aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile17
-rw-r--r--drivers/baseband/Makefile (renamed from drivers/bbdev/Makefile)0
-rw-r--r--drivers/baseband/null/Makefile (renamed from drivers/bbdev/null/Makefile)0
-rw-r--r--drivers/baseband/null/bbdev_null.c (renamed from drivers/bbdev/null/bbdev_null.c)6
-rw-r--r--drivers/baseband/null/rte_pmd_bbdev_null_version.map (renamed from drivers/bbdev/null/rte_pmd_bbdev_null_version.map)0
-rw-r--r--drivers/baseband/turbo_sw/Makefile (renamed from drivers/bbdev/turbo_sw/Makefile)0
-rw-r--r--drivers/baseband/turbo_sw/bbdev_turbo_software.c (renamed from drivers/bbdev/turbo_sw/bbdev_turbo_software.c)280
-rw-r--r--drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map (renamed from drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map)0
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/dpaa/base/fman/fman.c6
-rw-r--r--drivers/bus/dpaa/base/qbman/bman_driver.c2
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.c19
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_driver.c10
-rw-r--r--drivers/bus/dpaa/dpaa_bus.c130
-rw-r--r--drivers/bus/dpaa/include/compat.h30
-rw-r--r--drivers/bus/dpaa/include/fsl_qman.h24
-rw-r--r--drivers/bus/dpaa/meson.build29
-rw-r--r--drivers/bus/dpaa/rte_dpaa_bus.h37
-rw-r--r--drivers/bus/dpaa/rte_dpaa_logs.h11
-rw-r--r--drivers/bus/fslmc/Makefile19
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c181
-rw-r--r--drivers/bus/fslmc/fslmc_logs.h69
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.c384
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.h2
-rw-r--r--drivers/bus/fslmc/mc/dpdmai.c429
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpdmai.h189
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h107
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_cmd.h2
-rw-r--r--drivers/bus/fslmc/meson.build27
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c12
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpci.c100
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c123
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.h8
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_pvt.h94
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_base.h2
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c14
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys.h30
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys_decl.h23
-rw-r--r--drivers/bus/fslmc/rte_bus_fslmc_version.map17
-rw-r--r--drivers/bus/fslmc/rte_fslmc.h6
-rw-r--r--drivers/bus/ifpga/Makefile32
-rw-r--r--drivers/bus/ifpga/ifpga_bus.c467
-rw-r--r--drivers/bus/ifpga/ifpga_common.c88
-rw-r--r--drivers/bus/ifpga/ifpga_common.h18
-rw-r--r--drivers/bus/ifpga/ifpga_logs.h31
-rw-r--r--drivers/bus/ifpga/meson.build8
-rw-r--r--drivers/bus/ifpga/rte_bus_ifpga.h150
-rw-r--r--drivers/bus/ifpga/rte_bus_ifpga_version.map10
-rw-r--r--drivers/bus/meson.build2
-rw-r--r--drivers/bus/pci/Makefile36
-rw-r--r--drivers/bus/pci/bsd/Makefile32
-rw-r--r--drivers/bus/pci/linux/Makefile32
-rw-r--r--drivers/bus/pci/linux/pci.c35
-rw-r--r--drivers/bus/pci/meson.build3
-rw-r--r--drivers/bus/pci/pci_common.c27
-rw-r--r--drivers/bus/pci/rte_bus_pci.h3
-rw-r--r--drivers/bus/vdev/Makefile1
-rw-r--r--drivers/bus/vdev/meson.build2
-rw-r--r--drivers/bus/vdev/rte_bus_vdev.h35
-rw-r--r--drivers/bus/vdev/vdev.c262
-rw-r--r--drivers/common/Makefile11
-rw-r--r--drivers/common/meson.build7
-rw-r--r--drivers/common/octeontx/Makefile24
-rw-r--r--drivers/common/octeontx/meson.build5
-rw-r--r--drivers/common/octeontx/octeontx_mbox.c (renamed from drivers/mempool/octeontx/octeontx_mbox.c)67
-rw-r--r--drivers/common/octeontx/octeontx_mbox.h37
-rw-r--r--drivers/common/octeontx/rte_common_octeontx_version.map7
-rw-r--r--drivers/compress/Makefile8
-rw-r--r--drivers/compress/isal/Makefile31
-rw-r--r--drivers/compress/isal/isal_compress_pmd.c471
-rw-r--r--drivers/compress/isal/isal_compress_pmd_ops.c343
-rw-r--r--drivers/compress/isal/isal_compress_pmd_private.h57
-rw-r--r--drivers/compress/isal/meson.build14
-rw-r--r--drivers/compress/isal/rte_pmd_isal_version.map3
-rw-r--r--drivers/compress/meson.build8
-rw-r--r--drivers/crypto/Makefile8
-rw-r--r--drivers/crypto/aesni_gcm/Makefile10
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_ops.h3
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c6
-rw-r--r--drivers/crypto/aesni_mb/Makefile10
-rw-r--r--drivers/crypto/aesni_mb/aesni_mb_ops.h31
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c43
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c23
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h20
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c4
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c8
-rw-r--r--drivers/crypto/ccp/Makefile35
-rw-r--r--drivers/crypto/ccp/ccp_crypto.c2951
-rw-r--r--drivers/crypto/ccp/ccp_crypto.h388
-rw-r--r--drivers/crypto/ccp/ccp_dev.c810
-rw-r--r--drivers/crypto/ccp/ccp_dev.h495
-rw-r--r--drivers/crypto/ccp/ccp_pci.c236
-rw-r--r--drivers/crypto/ccp/ccp_pci.h27
-rw-r--r--drivers/crypto/ccp/ccp_pmd_ops.c848
-rw-r--r--drivers/crypto/ccp/ccp_pmd_private.h108
-rw-r--r--drivers/crypto/ccp/meson.build21
-rw-r--r--drivers/crypto/ccp/rte_ccp_pmd.c411
-rw-r--r--drivers/crypto/ccp/rte_pmd_ccp_version.map4
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile5
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c514
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h62
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h26
-rw-r--r--drivers/crypto/dpaa2_sec/meson.build14
-rw-r--r--drivers/crypto/dpaa_sec/Makefile5
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c314
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.h35
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec_log.h65
-rw-r--r--drivers/crypto/dpaa_sec/meson.build13
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c6
-rw-r--r--drivers/crypto/meson.build4
-rw-r--r--drivers/crypto/mrvl/Makefile67
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_compat.h51
-rw-r--r--drivers/crypto/mvsam/Makefile42
-rw-r--r--drivers/crypto/mvsam/meson.build21
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_compat.h23
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd.c (renamed from drivers/crypto/mrvl/rte_mrvl_pmd.c)38
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_ops.c (renamed from drivers/crypto/mrvl/rte_mrvl_pmd_ops.c)36
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_private.h (renamed from drivers/crypto/mrvl/rte_mrvl_pmd_private.h)38
-rw-r--r--drivers/crypto/mvsam/rte_pmd_mvsam_version.map (renamed from drivers/crypto/mrvl/rte_pmd_mrvl_version.map)0
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c2
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c6
-rw-r--r--drivers/crypto/qat/Makefile2
-rw-r--r--drivers/crypto/qat/meson.build2
-rw-r--r--drivers/crypto/qat/qat_adf/adf_transport_access_macros.h47
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw.h47
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw_la.h47
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_hw.h47
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h47
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c48
-rw-r--r--drivers/crypto/qat/qat_crypto.c2
-rw-r--r--drivers/crypto/qat/qat_crypto.h2
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h2
-rw-r--r--drivers/crypto/qat/qat_logs.h2
-rw-r--r--drivers/crypto/qat/qat_qp.c25
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c6
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c8
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h2
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c53
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c4
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c70
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c9
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h2
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c6
-rw-r--r--drivers/crypto/virtio/Makefile35
-rw-r--r--drivers/crypto/virtio/meson.build8
-rw-r--r--drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.h28
-rw-r--r--drivers/crypto/virtio/virtio_crypto_capabilities.h51
-rw-r--r--drivers/crypto/virtio/virtio_cryptodev.c1504
-rw-r--r--drivers/crypto/virtio/virtio_cryptodev.h61
-rw-r--r--drivers/crypto/virtio/virtio_logs.h91
-rw-r--r--drivers/crypto/virtio/virtio_pci.c462
-rw-r--r--drivers/crypto/virtio/virtio_pci.h253
-rw-r--r--drivers/crypto/virtio/virtio_ring.h137
-rw-r--r--drivers/crypto/virtio/virtio_rxtx.c515
-rw-r--r--drivers/crypto/virtio/virtqueue.c43
-rw-r--r--drivers/crypto/virtio/virtqueue.h171
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c115
-rw-r--r--drivers/event/Makefile4
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.c4
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.h2
-rw-r--r--drivers/event/dpaa/meson.build10
-rw-r--r--drivers/event/dpaa2/Makefile6
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c86
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h1
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev_logs.h10
-rw-r--r--drivers/event/dpaa2/dpaa2_hw_dpcon.c15
-rw-r--r--drivers/event/dpaa2/meson.build11
-rw-r--r--drivers/event/meson.build2
-rw-r--r--drivers/event/octeontx/Makefile12
-rw-r--r--drivers/event/octeontx/meson.build9
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c59
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h17
-rw-r--r--drivers/event/octeontx/ssovf_evdev_selftest.c36
-rw-r--r--drivers/event/octeontx/ssovf_probe.c (renamed from drivers/mempool/octeontx/octeontx_ssovf.c)25
-rw-r--r--drivers/event/octeontx/ssovf_worker.c17
-rw-r--r--drivers/event/octeontx/timvf_evdev.c407
-rw-r--r--drivers/event/octeontx/timvf_evdev.h225
-rw-r--r--drivers/event/octeontx/timvf_probe.c148
-rw-r--r--drivers/event/octeontx/timvf_worker.c199
-rw-r--r--drivers/event/octeontx/timvf_worker.h443
-rw-r--r--drivers/event/opdl/opdl_evdev.c2
-rw-r--r--drivers/event/opdl/opdl_evdev_init.c3
-rw-r--r--drivers/event/opdl/opdl_ring.c97
-rw-r--r--drivers/event/opdl/opdl_ring.h16
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.c2
-rw-r--r--drivers/event/sw/sw_evdev.c33
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c17
-rw-r--r--drivers/event/sw/sw_evdev_worker.c6
-rw-r--r--drivers/mempool/Makefile5
-rw-r--r--drivers/mempool/bucket/Makefile27
-rw-r--r--drivers/mempool/bucket/meson.build9
-rw-r--r--drivers/mempool/bucket/rte_mempool_bucket.c628
-rw-r--r--drivers/mempool/bucket/rte_mempool_bucket_version.map4
-rw-r--r--drivers/mempool/dpaa/Makefile3
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.c54
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.h2
-rw-r--r--drivers/mempool/dpaa/meson.build12
-rw-r--r--drivers/mempool/dpaa/rte_mempool_dpaa_version.map1
-rw-r--r--drivers/mempool/dpaa2/Makefile11
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c141
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h38
-rw-r--r--drivers/mempool/dpaa2/meson.build12
-rw-r--r--drivers/mempool/dpaa2/rte_dpaa2_mempool.h53
-rw-r--r--drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map9
-rw-r--r--drivers/mempool/meson.build2
-rw-r--r--drivers/mempool/octeontx/Makefile5
-rw-r--r--drivers/mempool/octeontx/meson.build6
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.c14
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.h36
-rw-r--r--drivers/mempool/octeontx/octeontx_pool_logs.h9
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx.c64
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx_version.map6
-rw-r--r--drivers/meson.build13
-rw-r--r--drivers/net/Makefile12
-rw-r--r--drivers/net/af_packet/Makefile37
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c175
-rw-r--r--drivers/net/ark/ark_ethdev.c13
-rw-r--r--drivers/net/avf/avf_ethdev.c34
-rw-r--r--drivers/net/avf/avf_rxtx.c9
-rw-r--r--drivers/net/avf/avf_rxtx.h10
-rw-r--r--drivers/net/avp/Makefile33
-rw-r--r--drivers/net/avp/avp_ethdev.c62
-rw-r--r--drivers/net/avp/avp_logs.h32
-rw-r--r--drivers/net/avp/rte_avp_common.h57
-rw-r--r--drivers/net/avp/rte_avp_fifo.h57
-rw-r--r--drivers/net/axgbe/Makefile35
-rw-r--r--drivers/net/axgbe/axgbe_common.h1710
-rw-r--r--drivers/net/axgbe/axgbe_dev.c1103
-rw-r--r--drivers/net/axgbe/axgbe_ethdev.c772
-rw-r--r--drivers/net/axgbe/axgbe_ethdev.h586
-rw-r--r--drivers/net/axgbe/axgbe_i2c.c331
-rw-r--r--drivers/net/axgbe/axgbe_logs.h26
-rw-r--r--drivers/net/axgbe/axgbe_mdio.c1066
-rw-r--r--drivers/net/axgbe/axgbe_phy.h192
-rw-r--r--drivers/net/axgbe/axgbe_phy_impl.c2191
-rw-r--r--drivers/net/axgbe/axgbe_rxtx.c674
-rw-r--r--drivers/net/axgbe/axgbe_rxtx.h186
-rw-r--r--drivers/net/axgbe/axgbe_rxtx_vec_sse.c93
-rw-r--r--drivers/net/axgbe/meson.build19
-rw-r--r--drivers/net/axgbe/rte_pmd_axgbe_version.map4
-rw-r--r--drivers/net/bnx2x/LICENSE.bnx2x_pmd29
-rw-r--r--drivers/net/bnx2x/Makefile9
-rw-r--r--drivers/net/bnx2x/bnx2x.c36
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c34
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_logs.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c15
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c6
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.h4
-rw-r--r--drivers/net/bnx2x/ecore_fw_defs.h6
-rw-r--r--drivers/net/bnx2x/ecore_hsi.h6
-rw-r--r--drivers/net/bnx2x/ecore_init.h6
-rw-r--r--drivers/net/bnx2x/ecore_init_ops.h6
-rw-r--r--drivers/net/bnx2x/ecore_mfw_req.h6
-rw-r--r--drivers/net/bnx2x/ecore_reg.h6
-rw-r--r--drivers/net/bnx2x/ecore_sp.c6
-rw-r--r--drivers/net/bnx2x/ecore_sp.h6
-rw-r--r--drivers/net/bnx2x/elink.c350
-rw-r--r--drivers/net/bnx2x/elink.h6
-rw-r--r--drivers/net/bnxt/Makefile37
-rw-r--r--drivers/net/bnxt/bnxt.h42
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c119
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h39
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c301
-rw-r--r--drivers/net/bnxt/bnxt_filter.c171
-rw-r--r--drivers/net/bnxt/bnxt_filter.h36
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c254
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h40
-rw-r--r--drivers/net/bnxt/bnxt_irq.c62
-rw-r--r--drivers/net/bnxt/bnxt_irq.h34
-rw-r--r--drivers/net/bnxt/bnxt_nvm_defs.h11
-rw-r--r--drivers/net/bnxt/bnxt_ring.c122
-rw-r--r--drivers/net/bnxt/bnxt_ring.h39
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c59
-rw-r--r--drivers/net/bnxt/bnxt_rxq.h36
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c75
-rw-r--r--drivers/net/bnxt/bnxt_rxr.h56
-rw-r--r--drivers/net/bnxt/bnxt_stats.c59
-rw-r--r--drivers/net/bnxt/bnxt_stats.h34
-rw-r--r--drivers/net/bnxt/bnxt_txq.c49
-rw-r--r--drivers/net/bnxt/bnxt_txq.h36
-rw-r--r--drivers/net/bnxt/bnxt_txr.c38
-rw-r--r--drivers/net/bnxt/bnxt_txr.h34
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c44
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h35
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h32478
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.c34
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.h34
-rw-r--r--drivers/net/bonding/Makefile1
-rw-r--r--drivers/net/bonding/meson.build3
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c126
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.c4
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c79
-rw-r--r--drivers/net/bonding/rte_eth_bond_args.c11
-rw-r--r--drivers/net/bonding/rte_eth_bond_flow.c228
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c414
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h32
-rw-r--r--drivers/net/bonding/rte_pmd_bond_version.map1
-rw-r--r--drivers/net/cxgbe/Makefile44
-rw-r--r--drivers/net/cxgbe/base/adapter.h75
-rw-r--r--drivers/net/cxgbe/base/common.h168
-rw-r--r--drivers/net/cxgbe/base/t4_chip_type.h34
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c783
-rw-r--r--drivers/net/cxgbe/base/t4_hw.h34
-rw-r--r--drivers/net/cxgbe/base/t4_msg.h34
-rw-r--r--drivers/net/cxgbe/base/t4_pci_id_tbl.h34
-rw-r--r--drivers/net/cxgbe/base/t4_regs.h141
-rw-r--r--drivers/net/cxgbe/base/t4_regs_values.h34
-rw-r--r--drivers/net/cxgbe/base/t4fw_interface.h401
-rw-r--r--drivers/net/cxgbe/base/t4vf_hw.c874
-rw-r--r--drivers/net/cxgbe/base/t4vf_hw.h15
-rw-r--r--drivers/net/cxgbe/cxgbe.h51
-rw-r--r--drivers/net/cxgbe/cxgbe_compat.h34
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c358
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c332
-rw-r--r--drivers/net/cxgbe/cxgbe_pfvf.h43
-rw-r--r--drivers/net/cxgbe/cxgbevf_ethdev.c198
-rw-r--r--drivers/net/cxgbe/cxgbevf_main.c311
-rw-r--r--drivers/net/cxgbe/sge.c426
-rw-r--r--drivers/net/dpaa/Makefile3
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.c112
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.h16
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.c21
-rw-r--r--drivers/net/dpaa/meson.build14
-rw-r--r--drivers/net/dpaa/rte_pmd_dpaa_version.map4
-rw-r--r--drivers/net/dpaa2/Makefile10
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni.c30
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c504
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.h6
-rw-r--r--drivers/net/dpaa2/dpaa2_pmd_logs.h41
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c165
-rw-r--r--drivers/net/dpaa2/meson.build18
-rw-r--r--drivers/net/e1000/Makefile4
-rw-r--r--drivers/net/e1000/base/e1000_82575.c5
-rw-r--r--drivers/net/e1000/base/e1000_defines.h1
-rw-r--r--drivers/net/e1000/base/e1000_phy.h8
-rw-r--r--drivers/net/e1000/e1000_ethdev.h27
-rw-r--r--drivers/net/e1000/e1000_logs.c26
-rw-r--r--drivers/net/e1000/e1000_logs.h6
-rw-r--r--drivers/net/e1000/em_ethdev.c146
-rw-r--r--drivers/net/e1000/em_rxtx.c103
-rw-r--r--drivers/net/e1000/igb_ethdev.c245
-rw-r--r--drivers/net/e1000/igb_flow.c110
-rw-r--r--drivers/net/e1000/igb_rxtx.c171
-rw-r--r--drivers/net/e1000/meson.build1
-rw-r--r--drivers/net/ena/Makefile3
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h9
-rw-r--r--drivers/net/ena/ena_ethdev.c79
-rw-r--r--drivers/net/enic/base/vnic_dev.c72
-rw-r--r--drivers/net/enic/base/vnic_dev.h8
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h15
-rw-r--r--drivers/net/enic/base/vnic_enet.h4
-rw-r--r--drivers/net/enic/base/vnic_nic.h2
-rw-r--r--drivers/net/enic/base/vnic_rq.h2
-rw-r--r--drivers/net/enic/base/vnic_wq.h1
-rw-r--r--drivers/net/enic/enic.h132
-rw-r--r--drivers/net/enic/enic_clsf.c21
-rw-r--r--drivers/net/enic/enic_ethdev.c323
-rw-r--r--drivers/net/enic/enic_flow.c87
-rw-r--r--drivers/net/enic/enic_main.c502
-rw-r--r--drivers/net/enic/enic_res.c85
-rw-r--r--drivers/net/enic/enic_res.h6
-rw-r--r--drivers/net/enic/enic_rxtx.c106
-rw-r--r--drivers/net/enic/meson.build19
-rw-r--r--drivers/net/failsafe/Makefile33
-rw-r--r--drivers/net/failsafe/failsafe.c53
-rw-r--r--drivers/net/failsafe/failsafe_args.c9
-rw-r--r--drivers/net/failsafe/failsafe_eal.c61
-rw-r--r--drivers/net/failsafe/failsafe_ether.c55
-rw-r--r--drivers/net/failsafe/failsafe_flow.c6
-rw-r--r--drivers/net/failsafe/failsafe_intr.c2
-rw-r--r--drivers/net/failsafe/failsafe_ops.c151
-rw-r--r--drivers/net/failsafe/failsafe_private.h18
-rw-r--r--drivers/net/failsafe/failsafe_rxtx.c2
-rw-r--r--drivers/net/fm10k/Makefile3
-rw-r--r--drivers/net/fm10k/fm10k.h3
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c101
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c6
-rw-r--r--drivers/net/i40e/Makefile5
-rw-r--r--drivers/net/i40e/base/i40e_register.h24
-rw-r--r--drivers/net/i40e/i40e_ethdev.c678
-rw-r--r--drivers/net/i40e/i40e_ethdev.h48
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c172
-rw-r--r--drivers/net/i40e/i40e_fdir.c2
-rw-r--r--drivers/net/i40e/i40e_flow.c217
-rw-r--r--drivers/net/i40e/i40e_rxtx.c238
-rw-r--r--drivers/net/i40e/i40e_rxtx.h3
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c35
-rw-r--r--drivers/net/i40e/i40e_vf_representor.c531
-rw-r--r--drivers/net/i40e/meson.build12
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c88
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h18
-rw-r--r--drivers/net/ifc/Makefile35
-rw-r--r--drivers/net/ifc/base/ifcvf.c298
-rw-r--r--drivers/net/ifc/base/ifcvf.h154
-rw-r--r--drivers/net/ifc/base/ifcvf_osdep.h52
-rw-r--r--drivers/net/ifc/ifcvf_vdpa.c792
-rw-r--r--drivers/net/ifc/rte_ifcvf_version.map4
-rw-r--r--drivers/net/ixgbe/Makefile6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c737
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h32
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c197
-rw-r--r--drivers/net/ixgbe/ixgbe_ipsec.c13
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c282
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h24
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_vf_representor.c231
-rw-r--r--drivers/net/ixgbe/meson.build3
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c230
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h84
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe_version.map10
-rw-r--r--drivers/net/kni/Makefile1
-rw-r--r--drivers/net/kni/rte_eth_kni.c66
-rw-r--r--drivers/net/liquidio/lio_ethdev.c72
-rw-r--r--drivers/net/meson.build8
-rw-r--r--drivers/net/mlx4/Makefile43
-rw-r--r--drivers/net/mlx4/mlx4.c202
-rw-r--r--drivers/net/mlx4/mlx4.h51
-rw-r--r--drivers/net/mlx4/mlx4_ethdev.c208
-rw-r--r--drivers/net/mlx4/mlx4_flow.c240
-rw-r--r--drivers/net/mlx4/mlx4_flow.h6
-rw-r--r--drivers/net/mlx4/mlx4_glue.c2
-rw-r--r--drivers/net/mlx4/mlx4_glue.h2
-rw-r--r--drivers/net/mlx4/mlx4_intr.c2
-rw-r--r--drivers/net/mlx4/mlx4_mr.c1247
-rw-r--r--drivers/net/mlx4/mlx4_mr.h122
-rw-r--r--drivers/net/mlx4/mlx4_prm.h2
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c99
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.c44
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.h92
-rw-r--r--drivers/net/mlx4/mlx4_txq.c122
-rw-r--r--drivers/net/mlx4/mlx4_utils.c2
-rw-r--r--drivers/net/mlx4/mlx4_utils.h2
-rw-r--r--drivers/net/mlx5/Makefile67
-rw-r--r--drivers/net/mlx5/mlx5.c796
-rw-r--r--drivers/net/mlx5/mlx5.h315
-rw-r--r--drivers/net/mlx5/mlx5_defs.h48
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c1039
-rw-r--r--drivers/net/mlx5/mlx5_flow.c2215
-rw-r--r--drivers/net/mlx5/mlx5_glue.c40
-rw-r--r--drivers/net/mlx5/mlx5_glue.h18
-rw-r--r--drivers/net/mlx5/mlx5_mac.c159
-rw-r--r--drivers/net/mlx5/mlx5_mr.c1325
-rw-r--r--drivers/net/mlx5/mlx5_mr.h117
-rw-r--r--drivers/net/mlx5/mlx5_nl.c627
-rw-r--r--drivers/net/mlx5/mlx5_prm.h41
-rw-r--r--drivers/net/mlx5/mlx5_rss.c178
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c38
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c1354
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c668
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h457
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.c40
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h11
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h37
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h21
-rw-r--r--drivers/net/mlx5/mlx5_socket.c173
-rw-r--r--drivers/net/mlx5/mlx5_stats.c246
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c274
-rw-r--r--drivers/net/mlx5/mlx5_txq.c422
-rw-r--r--drivers/net/mlx5/mlx5_utils.h31
-rw-r--r--drivers/net/mlx5/mlx5_vlan.c114
-rw-r--r--drivers/net/mrvl/Makefile68
-rw-r--r--drivers/net/mvpp2/Makefile42
-rw-r--r--drivers/net/mvpp2/meson.build25
-rw-r--r--drivers/net/mvpp2/mrvl_ethdev.c (renamed from drivers/net/mrvl/mrvl_ethdev.c)510
-rw-r--r--drivers/net/mvpp2/mrvl_ethdev.h (renamed from drivers/net/mrvl/mrvl_ethdev.h)47
-rw-r--r--drivers/net/mvpp2/mrvl_flow.c2779
-rw-r--r--drivers/net/mvpp2/mrvl_qos.c (renamed from drivers/net/mrvl/mrvl_qos.c)340
-rw-r--r--drivers/net/mvpp2/mrvl_qos.h (renamed from drivers/net/mrvl/mrvl_qos.h)58
-rw-r--r--drivers/net/mvpp2/rte_pmd_mvpp2_version.map (renamed from drivers/net/mrvl/rte_pmd_mrvl_version.map)0
-rw-r--r--drivers/net/nfp/Makefile17
-rw-r--r--drivers/net/nfp/nfp_net.c850
-rw-r--r--drivers/net/nfp/nfp_net_ctrl.h6
-rw-r--r--drivers/net/nfp/nfp_net_eth.h82
-rw-r--r--drivers/net/nfp/nfp_net_logs.h9
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h46
-rw-r--r--drivers/net/nfp/nfp_nfpu.c108
-rw-r--r--drivers/net/nfp/nfp_nfpu.h55
-rw-r--r--drivers/net/nfp/nfp_nspu.c642
-rw-r--r--drivers/net/nfp/nfp_nspu.h83
-rw-r--r--drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h722
-rw-r--r--drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h36
-rw-r--r--drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h592
-rw-r--r--drivers/net/nfp/nfpcore/nfp6000/nfp6000.h40
-rw-r--r--drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h26
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cpp.h779
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c941
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cppcore.c857
-rw-r--r--drivers/net/nfp/nfpcore/nfp_crc.c49
-rw-r--r--drivers/net/nfp/nfpcore/nfp_crc.h19
-rw-r--r--drivers/net/nfp/nfpcore/nfp_hwinfo.c199
-rw-r--r--drivers/net/nfp/nfpcore/nfp_hwinfo.h85
-rw-r--r--drivers/net/nfp/nfpcore/nfp_mip.c154
-rw-r--r--drivers/net/nfp/nfpcore/nfp_mip.h21
-rw-r--r--drivers/net/nfp/nfpcore/nfp_mutex.c424
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nffw.c235
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nffw.h86
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nsp.c427
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nsp.h304
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nsp_cmds.c109
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nsp_eth.c665
-rw-r--r--drivers/net/nfp/nfpcore/nfp_resource.c266
-rw-r--r--drivers/net/nfp/nfpcore/nfp_resource.h52
-rw-r--r--drivers/net/nfp/nfpcore/nfp_rtsym.c327
-rw-r--r--drivers/net/nfp/nfpcore/nfp_rtsym.h61
-rw-r--r--drivers/net/nfp/nfpcore/nfp_target.h579
-rw-r--r--drivers/net/null/meson.build1
-rw-r--r--drivers/net/null/rte_eth_null.c82
-rw-r--r--drivers/net/octeontx/Makefile3
-rw-r--r--drivers/net/octeontx/base/octeontx_bgx.c22
-rw-r--r--drivers/net/octeontx/base/octeontx_pkivf.c10
-rw-r--r--drivers/net/octeontx/base/octeontx_pkivf.h10
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.c102
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.h4
-rw-r--r--drivers/net/pcap/meson.build20
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c75
-rw-r--r--drivers/net/qede/LICENSE.qede_pmd29
-rw-r--r--drivers/net/qede/Makefile7
-rw-r--r--drivers/net/qede/base/bcm_osal.c15
-rw-r--r--drivers/net/qede/base/bcm_osal.h4
-rw-r--r--drivers/net/qede/base/common_hsi.h12
-rw-r--r--drivers/net/qede/base/ecore.h32
-rw-r--r--drivers/net/qede/base/ecore_attn_values.h4
-rw-r--r--drivers/net/qede/base/ecore_chain.h53
-rw-r--r--drivers/net/qede/base/ecore_cxt.c34
-rw-r--r--drivers/net/qede/base/ecore_cxt.h8
-rw-r--r--drivers/net/qede/base/ecore_cxt_api.h4
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c11
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h4
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h5
-rw-r--r--drivers/net/qede/base/ecore_dev.c330
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h24
-rw-r--r--drivers/net/qede/base/ecore_gtt_reg_addr.h4
-rw-r--r--drivers/net/qede/base/ecore_gtt_values.h4
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h110
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h112
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h33
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_func.h40
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_tool.h111
-rw-r--r--drivers/net/qede/base/ecore_hw.c126
-rw-r--r--drivers/net/qede/base/ecore_hw.h8
-rw-r--r--drivers/net/qede/base/ecore_hw_defs.h4
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c214
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h37
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c36
-rw-r--r--drivers/net/qede/base/ecore_init_ops.h4
-rw-r--r--drivers/net/qede/base/ecore_int.c38
-rw-r--r--drivers/net/qede/base/ecore_int.h7
-rw-r--r--drivers/net/qede/base/ecore_int_api.h4
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h15
-rw-r--r--drivers/net/qede/base/ecore_iro.h4
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h68
-rw-r--r--drivers/net/qede/base/ecore_l2.c69
-rw-r--r--drivers/net/qede/base/ecore_l2.h4
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h6
-rw-r--r--drivers/net/qede/base/ecore_mcp.c224
-rw-r--r--drivers/net/qede/base/ecore_mcp.h4
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h50
-rw-r--r--drivers/net/qede/base/ecore_mng_tlv.c8
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h7
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h595
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h4
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c33
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h4
-rw-r--r--drivers/net/qede/base/ecore_spq.c40
-rw-r--r--drivers/net/qede/base/ecore_spq.h17
-rw-r--r--drivers/net/qede/base/ecore_sriov.c48
-rw-r--r--drivers/net/qede/base/ecore_sriov.h4
-rw-r--r--drivers/net/qede/base/ecore_status.h4
-rw-r--r--drivers/net/qede/base/ecore_utils.h4
-rw-r--r--drivers/net/qede/base/ecore_vf.c9
-rw-r--r--drivers/net/qede/base/ecore_vf.h4
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h4
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h12
-rw-r--r--drivers/net/qede/base/eth_common.h7
-rw-r--r--drivers/net/qede/base/mcp_public.h7
-rw-r--r--drivers/net/qede/base/nvm_cfg.h4
-rw-r--r--drivers/net/qede/base/reg_addr.h14
-rw-r--r--drivers/net/qede/qede_ethdev.c392
-rw-r--r--drivers/net/qede/qede_ethdev.h13
-rw-r--r--drivers/net/qede/qede_fdir.c8
-rw-r--r--drivers/net/qede/qede_if.h4
-rw-r--r--drivers/net/qede/qede_logs.h4
-rw-r--r--drivers/net/qede/qede_main.c13
-rw-r--r--drivers/net/qede/qede_rxtx.c24
-rw-r--r--drivers/net/qede/qede_rxtx.h9
-rw-r--r--drivers/net/ring/meson.build1
-rw-r--r--drivers/net/ring/rte_eth_ring.c66
-rw-r--r--drivers/net/sfc/Makefile7
-rw-r--r--drivers/net/sfc/base/ef10_ev.c112
-rw-r--r--drivers/net/sfc/base/ef10_filter.c163
-rw-r--r--drivers/net/sfc/base/ef10_image.c885
-rw-r--r--drivers/net/sfc/base/ef10_impl.h74
-rw-r--r--drivers/net/sfc/base/ef10_intr.c13
-rw-r--r--drivers/net/sfc/base/ef10_mac.c193
-rw-r--r--drivers/net/sfc/base/ef10_mcdi.c25
-rw-r--r--drivers/net/sfc/base/ef10_nic.c923
-rw-r--r--drivers/net/sfc/base/ef10_nvram.c33
-rw-r--r--drivers/net/sfc/base/ef10_phy.c126
-rw-r--r--drivers/net/sfc/base/ef10_rx.c184
-rw-r--r--drivers/net/sfc/base/ef10_signed_image_layout.h62
-rw-r--r--drivers/net/sfc/base/ef10_tlv_layout.h115
-rw-r--r--drivers/net/sfc/base/ef10_tx.c67
-rw-r--r--drivers/net/sfc/base/ef10_vpd.c37
-rw-r--r--drivers/net/sfc/base/efx.h425
-rw-r--r--drivers/net/sfc/base/efx_bootcfg.c89
-rw-r--r--drivers/net/sfc/base/efx_check.h122
-rw-r--r--drivers/net/sfc/base/efx_ev.c10
-rw-r--r--drivers/net/sfc/base/efx_filter.c71
-rw-r--r--drivers/net/sfc/base/efx_impl.h134
-rw-r--r--drivers/net/sfc/base/efx_intr.c21
-rw-r--r--drivers/net/sfc/base/efx_lic.c26
-rw-r--r--drivers/net/sfc/base/efx_mac.c38
-rw-r--r--drivers/net/sfc/base/efx_mcdi.c95
-rw-r--r--drivers/net/sfc/base/efx_mcdi.h4
-rw-r--r--drivers/net/sfc/base/efx_mon.c6
-rw-r--r--drivers/net/sfc/base/efx_nic.c304
-rw-r--r--drivers/net/sfc/base/efx_nvram.c10
-rw-r--r--drivers/net/sfc/base/efx_phy.c14
-rw-r--r--drivers/net/sfc/base/efx_port.c5
-rw-r--r--drivers/net/sfc/base/efx_regs_ef10.h230
-rw-r--r--drivers/net/sfc/base/efx_regs_mcdi.h9375
-rw-r--r--drivers/net/sfc/base/efx_regs_mcdi_aoe.h2914
-rw-r--r--drivers/net/sfc/base/efx_rx.c245
-rw-r--r--drivers/net/sfc/base/efx_sram.c14
-rw-r--r--drivers/net/sfc/base/efx_tunnel.c32
-rw-r--r--drivers/net/sfc/base/efx_tx.c56
-rw-r--r--drivers/net/sfc/base/efx_types.h38
-rw-r--r--drivers/net/sfc/base/efx_vpd.c10
-rw-r--r--drivers/net/sfc/base/hunt_nic.c172
-rw-r--r--drivers/net/sfc/base/mcdi_mon.c9
-rw-r--r--drivers/net/sfc/base/medford2_impl.h35
-rw-r--r--drivers/net/sfc/base/medford2_nic.c162
-rw-r--r--drivers/net/sfc/base/medford_nic.c240
-rw-r--r--drivers/net/sfc/base/meson.build4
-rw-r--r--drivers/net/sfc/base/siena_flash.h9
-rw-r--r--drivers/net/sfc/base/siena_mac.c31
-rw-r--r--drivers/net/sfc/base/siena_mcdi.c12
-rw-r--r--drivers/net/sfc/base/siena_nic.c24
-rw-r--r--drivers/net/sfc/base/siena_nvram.c17
-rw-r--r--drivers/net/sfc/base/siena_phy.c9
-rw-r--r--drivers/net/sfc/base/siena_vpd.c25
-rw-r--r--drivers/net/sfc/efsys.h17
-rw-r--r--drivers/net/sfc/meson.build5
-rw-r--r--drivers/net/sfc/sfc.c350
-rw-r--r--drivers/net/sfc/sfc.h41
-rw-r--r--drivers/net/sfc/sfc_dp.c5
-rw-r--r--drivers/net/sfc/sfc_dp.h9
-rw-r--r--drivers/net/sfc/sfc_dp_rx.h29
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h2
-rw-r--r--drivers/net/sfc/sfc_ef10.h34
-rw-r--r--drivers/net/sfc/sfc_ef10_essb_rx.c700
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c181
-rw-r--r--drivers/net/sfc/sfc_ef10_rx_ev.h169
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c2
-rw-r--r--drivers/net/sfc/sfc_ethdev.c223
-rw-r--r--drivers/net/sfc/sfc_ev.c56
-rw-r--r--drivers/net/sfc/sfc_flow.c1243
-rw-r--r--drivers/net/sfc/sfc_flow.h23
-rw-r--r--drivers/net/sfc/sfc_intr.c6
-rw-r--r--drivers/net/sfc/sfc_kvargs.c4
-rw-r--r--drivers/net/sfc/sfc_kvargs.h24
-rw-r--r--drivers/net/sfc/sfc_log.h77
-rw-r--r--drivers/net/sfc/sfc_mcdi.c25
-rw-r--r--drivers/net/sfc/sfc_port.c68
-rw-r--r--drivers/net/sfc/sfc_rx.c398
-rw-r--r--drivers/net/sfc/sfc_rx.h11
-rw-r--r--drivers/net/sfc/sfc_tso.c3
-rw-r--r--drivers/net/sfc/sfc_tweak.h8
-rw-r--r--drivers/net/sfc/sfc_tx.c73
-rw-r--r--drivers/net/softnic/rte_eth_softnic.c45
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tm.c23
-rw-r--r--drivers/net/szedata2/Makefile33
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c930
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.h37
-rw-r--r--drivers/net/szedata2/szedata2_iobuf.c203
-rw-r--r--drivers/net/szedata2/szedata2_iobuf.h356
-rw-r--r--drivers/net/szedata2/szedata2_logs.h22
-rw-r--r--drivers/net/tap/rte_eth_tap.c645
-rw-r--r--drivers/net/tap/rte_eth_tap.h13
-rw-r--r--drivers/net/tap/tap_bpf.h2
-rw-r--r--drivers/net/tap/tap_bpf_api.c2
-rw-r--r--drivers/net/tap/tap_bpf_insns.h2
-rw-r--r--drivers/net/tap/tap_bpf_program.c4
-rw-r--r--drivers/net/tap/tap_flow.c126
-rw-r--r--drivers/net/tap/tap_flow.h2
-rw-r--r--drivers/net/tap/tap_intr.c4
-rw-r--r--drivers/net/tap/tap_log.h10
-rw-r--r--drivers/net/tap/tap_netlink.c20
-rw-r--r--drivers/net/tap/tap_netlink.h2
-rw-r--r--drivers/net/tap/tap_rss.h8
-rw-r--r--drivers/net/tap/tap_tcmsgs.c11
-rw-r--r--drivers/net/tap/tap_tcmsgs.h2
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h5
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c141
-rw-r--r--drivers/net/vdev_netvsc/Makefile2
-rw-r--r--drivers/net/vdev_netvsc/vdev_netvsc.c275
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c396
-rw-r--r--drivers/net/vhost/rte_eth_vhost.h35
-rw-r--r--drivers/net/virtio/meson.build27
-rw-r--r--drivers/net/virtio/virtio_ethdev.c166
-rw-r--r--drivers/net/virtio/virtio_ethdev.h8
-rw-r--r--drivers/net/virtio/virtio_rxtx.c9
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel.c86
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c14
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.h3
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c76
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c175
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h8
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c131
-rw-r--r--drivers/net/vmxnet3/Makefile4
-rw-r--r--drivers/net/vmxnet3/base/upt1_defs.h7
-rw-r--r--drivers/net/vmxnet3/base/vmxnet3_defs.h34
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c150
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c212
-rw-r--r--drivers/raw/Makefile5
-rw-r--r--drivers/raw/dpaa2_cmdif/Makefile36
-rw-r--r--drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c300
-rw-r--r--drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h46
-rw-r--r--drivers/raw/dpaa2_cmdif/meson.build9
-rw-r--r--drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h35
-rw-r--r--drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map4
-rw-r--r--drivers/raw/dpaa2_qdma/Makefile37
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.c1002
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.h150
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h46
-rw-r--r--drivers/raw/dpaa2_qdma/meson.build9
-rw-r--r--drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h286
-rw-r--r--drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map20
-rw-r--r--drivers/raw/ifpga_rawdev/Makefile36
-rw-r--r--drivers/raw/ifpga_rawdev/base/Makefile26
-rw-r--r--drivers/raw/ifpga_rawdev/base/README31
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_api.c294
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_api.h28
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_compat.h58
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_defines.h1663
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c821
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h11
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c253
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h168
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme.c734
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c301
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c381
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c715
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c352
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_hw.h127
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_port.c388
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_port_error.c144
-rw-r--r--drivers/raw/ifpga_rawdev/base/meson.build34
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_debug.c99
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_debug.h19
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_hw_api.c381
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_hw_api.h253
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c145
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h279
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_osdep.h79
-rw-r--r--drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h75
-rw-r--r--drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h45
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.c619
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.h37
-rw-r--r--drivers/raw/ifpga_rawdev/meson.build15
-rw-r--r--drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map4
-rw-r--r--drivers/raw/meson.build7
-rw-r--r--drivers/raw/skeleton_rawdev/meson.build8
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c3
775 files changed, 116156 insertions, 30313 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index ee65c87b..c88638c8 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -3,18 +3,21 @@
include $(RTE_SDK)/mk/rte.vars.mk
+DIRS-y += common
DIRS-y += bus
DIRS-y += mempool
-DEPDIRS-mempool := bus
+DEPDIRS-mempool := common bus
DIRS-y += net
-DEPDIRS-net := bus mempool
-DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += bbdev
-DEPDIRS-bbdev := bus mempool
+DEPDIRS-net := common bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += baseband
+DEPDIRS-baseband := common bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
-DEPDIRS-crypto := bus mempool
+DEPDIRS-crypto := common bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += compress
+DEPDIRS-compress := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
-DEPDIRS-event := bus mempool net
+DEPDIRS-event := common bus mempool net
DIRS-$(CONFIG_RTE_LIBRTE_RAWDEV) += raw
-DEPDIRS-raw := bus mempool net event
+DEPDIRS-raw := common bus mempool net event
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bbdev/Makefile b/drivers/baseband/Makefile
index 4ec83b0a..4ec83b0a 100644
--- a/drivers/bbdev/Makefile
+++ b/drivers/baseband/Makefile
diff --git a/drivers/bbdev/null/Makefile b/drivers/baseband/null/Makefile
index f885a97b..f885a97b 100644
--- a/drivers/bbdev/null/Makefile
+++ b/drivers/baseband/null/Makefile
diff --git a/drivers/bbdev/null/bbdev_null.c b/drivers/baseband/null/bbdev_null.c
index 6bc84917..76fc9d7a 100644
--- a/drivers/bbdev/null/bbdev_null.c
+++ b/drivers/baseband/null/bbdev_null.c
@@ -13,7 +13,7 @@
#include <rte_bbdev.h>
#include <rte_bbdev_pmd.h>
-#define DRIVER_NAME bbdev_null
+#define DRIVER_NAME baseband_null
/* NULL BBDev logging ID */
static int bbdev_null_logtype;
@@ -71,7 +71,8 @@ info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
dev_info->max_num_queues = internals->max_nb_queues;
dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
dev_info->hardware_accelerated = false;
- dev_info->max_queue_priority = 0;
+ dev_info->max_dl_queue_priority = 0;
+ dev_info->max_ul_queue_priority = 0;
dev_info->default_queue_conf = default_queue_conf;
dev_info->capabilities = bbdev_capabilities;
dev_info->cpu_flag_reqs = NULL;
@@ -345,6 +346,7 @@ RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
BBDEV_NULL_MAX_NB_QUEUES_ARG"=<int> "
BBDEV_NULL_SOCKET_ID_ARG"=<int>");
+RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, bbdev_null);
RTE_INIT(null_bbdev_init_log);
static void
diff --git a/drivers/bbdev/null/rte_pmd_bbdev_null_version.map b/drivers/baseband/null/rte_pmd_bbdev_null_version.map
index 58b94270..58b94270 100644
--- a/drivers/bbdev/null/rte_pmd_bbdev_null_version.map
+++ b/drivers/baseband/null/rte_pmd_bbdev_null_version.map
diff --git a/drivers/bbdev/turbo_sw/Makefile b/drivers/baseband/turbo_sw/Makefile
index 79eb5547..79eb5547 100644
--- a/drivers/bbdev/turbo_sw/Makefile
+++ b/drivers/baseband/turbo_sw/Makefile
diff --git a/drivers/bbdev/turbo_sw/bbdev_turbo_software.c b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
index 302abf5c..05e95ed9 100644
--- a/drivers/bbdev/turbo_sw/bbdev_turbo_software.c
+++ b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
@@ -9,6 +9,7 @@
#include <rte_malloc.h>
#include <rte_ring.h>
#include <rte_kvargs.h>
+#include <rte_cycles.h>
#include <rte_bbdev.h>
#include <rte_bbdev_pmd.h>
@@ -18,7 +19,7 @@
#include <phy_rate_match.h>
#include <divide.h>
-#define DRIVER_NAME turbo_sw
+#define DRIVER_NAME baseband_turbo_sw
/* Turbo SW PMD logging ID */
static int bbdev_turbo_sw_logtype;
@@ -32,11 +33,9 @@ static int bbdev_turbo_sw_logtype;
rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
##__VA_ARGS__)
-/* Number of columns in sub-block interleaver (36.212, section 5.1.4.1.1) */
-#define C_SUBBLOCK (32)
-#define MAX_TB_SIZE (391656)
-#define MAX_CB_SIZE (6144)
-#define MAX_KW (18528)
+#define DEINT_INPUT_BUF_SIZE (((RTE_BBDEV_MAX_CB_SIZE >> 3) + 1) * 48)
+#define DEINT_OUTPUT_BUF_SIZE (DEINT_INPUT_BUF_SIZE * 6)
+#define ADAPTER_OUTPUT_BUF_SIZE ((RTE_BBDEV_MAX_CB_SIZE + 4) * 48)
/* private data structure */
struct bbdev_private {
@@ -90,7 +89,7 @@ compute_idx(uint16_t k)
{
int32_t result = 0;
- if (k < 40 || k > MAX_CB_SIZE)
+ if (k < RTE_BBDEV_MIN_CB_SIZE || k > RTE_BBDEV_MAX_CB_SIZE)
return -1;
if (k > 2048) {
@@ -140,7 +139,9 @@ info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN |
RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
RTE_BBDEV_TURBO_CRC_TYPE_24B |
+ RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |
RTE_BBDEV_TURBO_EARLY_TERMINATION,
+ .max_llr_modulus = 16,
.num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
.num_buffers_hard_out =
RTE_BBDEV_MAX_CODE_BLOCKS,
@@ -174,7 +175,8 @@ info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
dev_info->max_num_queues = internals->max_nb_queues;
dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
dev_info->hardware_accelerated = false;
- dev_info->max_queue_priority = 0;
+ dev_info->max_dl_queue_priority = 0;
+ dev_info->max_ul_queue_priority = 0;
dev_info->default_queue_conf = default_queue_conf;
dev_info->capabilities = bbdev_capabilities;
dev_info->cpu_flag_reqs = &cpu_flag;
@@ -225,7 +227,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
}
/* Allocate memory for encoder output. */
- ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_out%u:%u",
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_o%u:%u",
dev->data->dev_id, q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -234,7 +236,8 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->enc_out = rte_zmalloc_socket(name,
- ((MAX_TB_SIZE >> 3) + 3) * sizeof(*q->enc_out) * 3,
+ ((RTE_BBDEV_MAX_TB_SIZE >> 3) + 3) *
+ sizeof(*q->enc_out) * 3,
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->enc_out == NULL) {
rte_bbdev_log(ERR,
@@ -244,7 +247,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
/* Allocate memory for rate matching output. */
ret = snprintf(name, RTE_RING_NAMESIZE,
- RTE_STR(DRIVER_NAME)"_enc_in%u:%u", dev->data->dev_id,
+ RTE_STR(DRIVER_NAME)"_enc_i%u:%u", dev->data->dev_id,
q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -253,7 +256,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->enc_in = rte_zmalloc_socket(name,
- (MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
+ (RTE_BBDEV_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->enc_in == NULL) {
rte_bbdev_log(ERR,
@@ -271,7 +274,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->ag = rte_zmalloc_socket(name,
- MAX_CB_SIZE * 10 * sizeof(*q->ag),
+ RTE_BBDEV_MAX_CB_SIZE * 10 * sizeof(*q->ag),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->ag == NULL) {
rte_bbdev_log(ERR,
@@ -289,7 +292,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->code_block = rte_zmalloc_socket(name,
- (6144 >> 3) * sizeof(*q->code_block),
+ RTE_BBDEV_MAX_CB_SIZE * sizeof(*q->code_block),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->code_block == NULL) {
rte_bbdev_log(ERR,
@@ -299,7 +302,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
/* Allocate memory for Deinterleaver input. */
ret = snprintf(name, RTE_RING_NAMESIZE,
- RTE_STR(DRIVER_NAME)"_deint_input%u:%u",
+ RTE_STR(DRIVER_NAME)"_de_i%u:%u",
dev->data->dev_id, q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -308,7 +311,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->deint_input = rte_zmalloc_socket(name,
- MAX_KW * sizeof(*q->deint_input),
+ DEINT_INPUT_BUF_SIZE * sizeof(*q->deint_input),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->deint_input == NULL) {
rte_bbdev_log(ERR,
@@ -318,7 +321,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
/* Allocate memory for Deinterleaver output. */
ret = snprintf(name, RTE_RING_NAMESIZE,
- RTE_STR(DRIVER_NAME)"_deint_output%u:%u",
+ RTE_STR(DRIVER_NAME)"_de_o%u:%u",
dev->data->dev_id, q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -327,7 +330,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->deint_output = rte_zmalloc_socket(NULL,
- MAX_KW * sizeof(*q->deint_output),
+ DEINT_OUTPUT_BUF_SIZE * sizeof(*q->deint_output),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->deint_output == NULL) {
rte_bbdev_log(ERR,
@@ -337,7 +340,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
/* Allocate memory for Adapter output. */
ret = snprintf(name, RTE_RING_NAMESIZE,
- RTE_STR(DRIVER_NAME)"_adapter_output%u:%u",
+ RTE_STR(DRIVER_NAME)"_ada_o%u:%u",
dev->data->dev_id, q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -346,7 +349,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->adapter_output = rte_zmalloc_socket(NULL,
- MAX_CB_SIZE * 6 * sizeof(*q->adapter_output),
+ ADAPTER_OUTPUT_BUF_SIZE * sizeof(*q->adapter_output),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->adapter_output == NULL) {
rte_bbdev_log(ERR,
@@ -414,9 +417,9 @@ is_enc_input_valid(const uint16_t k, const int32_t k_idx,
return -1;
}
- if (k > MAX_CB_SIZE) {
+ if (k > RTE_BBDEV_MAX_CB_SIZE) {
rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
- k, MAX_CB_SIZE);
+ k, RTE_BBDEV_MAX_CB_SIZE);
return -1;
}
@@ -441,9 +444,9 @@ is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length)
return -1;
}
- if (kw > MAX_KW) {
+ if (kw > RTE_BBDEV_MAX_KW) {
rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d",
- kw, MAX_KW);
+ kw, RTE_BBDEV_MAX_KW);
return -1;
}
@@ -452,20 +455,28 @@ is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length)
static inline void
process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
- uint8_t cb_idx, uint8_t c, uint16_t k, uint16_t ncb,
+ uint8_t r, uint8_t c, uint16_t k, uint16_t ncb,
uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out,
- uint16_t in_offset, uint16_t out_offset, uint16_t total_left)
+ uint16_t in_offset, uint16_t out_offset, uint16_t total_left,
+ struct rte_bbdev_stats *q_stats)
{
int ret;
int16_t k_idx;
uint16_t m;
uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out;
+ uint64_t first_3_bytes = 0;
struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
struct bblib_crc_request crc_req;
+ struct bblib_crc_response crc_resp;
struct bblib_turbo_encoder_request turbo_req;
struct bblib_turbo_encoder_response turbo_resp;
struct bblib_rate_match_dl_request rm_req;
struct bblib_rate_match_dl_response rm_resp;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ uint64_t start_time;
+#else
+ RTE_SET_USED(q_stats);
+#endif
k_idx = compute_idx(k);
in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
@@ -478,18 +489,31 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
return;
}
- /* copy the input to the temporary buffer to be able to extend
- * it by 3 CRC bytes
- */
- rte_memcpy(q->enc_in, in, (k - 24) >> 3);
- crc_req.data = q->enc_in;
+ crc_req.data = in;
crc_req.len = (k - 24) >> 3;
- if (bblib_lte_crc24a_gen(&crc_req) == -1) {
- op->status |= 1 << RTE_BBDEV_CRC_ERROR;
- rte_bbdev_log(ERR, "CRC24a generation failed");
- return;
+ /* Check if there is a room for CRC bits. If not use
+ * the temporary buffer.
+ */
+ if (rte_pktmbuf_append(m_in, 3) == NULL) {
+ rte_memcpy(q->enc_in, in, (k - 24) >> 3);
+ in = q->enc_in;
+ } else {
+ /* Store 3 first bytes of next CB as they will be
+ * overwritten by CRC bytes. If it is the last CB then
+ * there is no point to store 3 next bytes and this
+ * if..else branch will be omitted.
+ */
+ first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
}
- in = q->enc_in;
+
+ crc_resp.data = in;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+ bblib_lte_crc24a_gen(&crc_req, &crc_resp);
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
} else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) {
/* CRC24B */
ret = is_enc_input_valid(k - 24, k_idx, total_left);
@@ -497,18 +521,31 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
return;
}
- /* copy the input to the temporary buffer to be able to extend
- * it by 3 CRC bytes
- */
- rte_memcpy(q->enc_in, in, (k - 24) >> 3);
- crc_req.data = q->enc_in;
+ crc_req.data = in;
crc_req.len = (k - 24) >> 3;
- if (bblib_lte_crc24b_gen(&crc_req) == -1) {
- op->status |= 1 << RTE_BBDEV_CRC_ERROR;
- rte_bbdev_log(ERR, "CRC24b generation failed");
- return;
+ /* Check if there is a room for CRC bits. If this is the last
+ * CB in TB. If not use temporary buffer.
+ */
+ if ((c - r == 1) && (rte_pktmbuf_append(m_in, 3) == NULL)) {
+ rte_memcpy(q->enc_in, in, (k - 24) >> 3);
+ in = q->enc_in;
+ } else if (c - r > 1) {
+ /* Store 3 first bytes of next CB as they will be
+ * overwritten by CRC bytes. If it is the last CB then
+ * there is no point to store 3 next bytes and this
+ * if..else branch will be omitted.
+ */
+ first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
}
- in = q->enc_in;
+
+ crc_resp.data = in;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+ bblib_lte_crc24b_gen(&crc_req, &crc_resp);
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
} else {
ret = is_enc_input_valid(k, k_idx, total_left);
if (ret != 0) {
@@ -522,10 +559,32 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
/* Each bit layer output from turbo encoder is (k+4) bits long, i.e.
* input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up.
* So dst_data's length should be 3*(k/8) + 3 bytes.
+ * In Rate-matching bypass case outputs pointers passed to encoder
+ * (out0, out1 and out2) can directly point to addresses of output from
+ * turbo_enc entity.
*/
- out0 = q->enc_out;
- out1 = RTE_PTR_ADD(out0, (k >> 3) + 1);
- out2 = RTE_PTR_ADD(out1, (k >> 3) + 1);
+ if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
+ out0 = q->enc_out;
+ out1 = RTE_PTR_ADD(out0, (k >> 3) + 1);
+ out2 = RTE_PTR_ADD(out1, (k >> 3) + 1);
+ } else {
+ out0 = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3) * 3 + 2);
+ if (out0 == NULL) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Too little space in output mbuf");
+ return;
+ }
+ enc->output.length += (k >> 3) * 3 + 2;
+ /* rte_bbdev_op_data.offset can be different than the
+ * offset of the appended bytes
+ */
+ out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
+ out1 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
+ out_offset + (k >> 3) + 1);
+ out2 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
+ out_offset + 2 * ((k >> 3) + 1));
+ }
turbo_req.case_id = k_idx;
turbo_req.input_win = in;
@@ -533,16 +592,37 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
turbo_resp.output_win_0 = out0;
turbo_resp.output_win_1 = out1;
turbo_resp.output_win_2 = out2;
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+
if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) {
op->status |= 1 << RTE_BBDEV_DRV_ERROR;
rte_bbdev_log(ERR, "Turbo Encoder failed");
return;
}
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
+
+ /* Restore 3 first bytes of next CB if they were overwritten by CRC*/
+ if (first_3_bytes != 0)
+ *((uint64_t *)&in[(k - 32) >> 3]) = first_3_bytes;
+
/* Rate-matching */
if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
+ uint8_t mask_id;
+ /* Integer round up division by 8 */
+ uint16_t out_len = (e + 7) >> 3;
+ /* The mask array is indexed using E%8. E is an even number so
+ * there are only 4 possible values.
+ */
+ const uint8_t mask_out[] = {0xFF, 0xC0, 0xF0, 0xFC};
+
/* get output data starting address */
- rm_out = (uint8_t *)rte_pktmbuf_append(m_out, (e >> 3));
+ rm_out = (uint8_t *)rte_pktmbuf_append(m_out, out_len);
if (rm_out == NULL) {
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
rte_bbdev_log(ERR,
@@ -555,7 +635,7 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
/* index of current code block */
- rm_req.r = cb_idx;
+ rm_req.r = r;
/* total number of code block */
rm_req.C = c;
/* For DL - 1, UL - 0 */
@@ -582,17 +662,32 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
rm_req.tin1 = out1;
rm_req.tin2 = out2;
rm_resp.output = rm_out;
- rm_resp.OutputLen = (e >> 3);
+ rm_resp.OutputLen = out_len;
if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS)
rm_req.bypass_rvidx = 1;
else
rm_req.bypass_rvidx = 0;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+
if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) {
op->status |= 1 << RTE_BBDEV_DRV_ERROR;
rte_bbdev_log(ERR, "Rate matching failed");
return;
}
+
+ /* SW fills an entire last byte even if E%8 != 0. Clear the
+ * superfluous data bits for consistency with HW device.
+ */
+ mask_id = (e & 7) >> 1;
+ rm_out[out_len - 1] &= mask_out[mask_id];
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
+
enc->output.length += rm_resp.OutputLen;
} else {
/* Rate matching is bypassed */
@@ -616,28 +711,12 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
tmp_out++;
}
*tmp_out = 0;
-
- /* copy shifted output to turbo_enc entity */
- out0 = (uint8_t *)rte_pktmbuf_append(m_out,
- (k >> 3) * 3 + 2);
- if (out0 == NULL) {
- op->status |= 1 << RTE_BBDEV_DATA_ERROR;
- rte_bbdev_log(ERR,
- "Too little space in output mbuf");
- return;
- }
- enc->output.length += (k >> 3) * 3 + 2;
- /* rte_bbdev_op_data.offset can be different than the
- * offset of the appended bytes
- */
- out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
- out_offset);
- rte_memcpy(out0, q->enc_out, (k >> 3) * 3 + 2);
}
}
static inline void
-enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
+enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
+ struct rte_bbdev_stats *queue_stats)
{
uint8_t c, r, crc24_bits = 0;
uint16_t k, ncb;
@@ -652,9 +731,9 @@ enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
/* Clear op status */
op->status = 0;
- if (total_left > MAX_TB_SIZE >> 3) {
+ if (total_left > RTE_BBDEV_MAX_TB_SIZE >> 3) {
rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
- total_left, MAX_TB_SIZE);
+ total_left, RTE_BBDEV_MAX_TB_SIZE);
op->status = 1 << RTE_BBDEV_DATA_ERROR;
return;
}
@@ -692,7 +771,8 @@ enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
}
process_enc_cb(q, op, r, c, k, ncb, e, m_in,
- m_out, in_offset, out_offset, total_left);
+ m_out, in_offset, out_offset, total_left,
+ queue_stats);
/* Update total_left */
total_left -= (k - crc24_bits) >> 3;
/* Update offsets for next CBs (if exist) */
@@ -714,12 +794,15 @@ enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
static inline uint16_t
enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops,
- uint16_t nb_ops)
+ uint16_t nb_ops, struct rte_bbdev_stats *queue_stats)
{
uint16_t i;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ queue_stats->offload_time = 0;
+#endif
for (i = 0; i < nb_ops; ++i)
- enqueue_enc_one_op(q, ops[i]);
+ enqueue_enc_one_op(q, ops[i], queue_stats);
return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
NULL);
@@ -739,11 +822,11 @@ remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
const uint32_t d = k + 4;
const uint32_t kw = (ncb / 3);
const uint32_t nd = kw - d;
- const uint32_t r_subblock = kw / C_SUBBLOCK;
+ const uint32_t r_subblock = kw / RTE_BBDEV_C_SUBBLOCK;
/* Inter-column permutation pattern */
- const uint32_t P[C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10,
- 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 29, 3, 19,
- 11, 27, 7, 23, 15, 31};
+ const uint32_t P[RTE_BBDEV_C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28,
+ 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13,
+ 29, 3, 19, 11, 27, 7, 23, 15, 31};
in_idx = 0;
out_idx = 0;
@@ -786,7 +869,7 @@ remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
}
/* Last interlaced row is different - its last byte is the only padding
- * byte. We can have from 2 up to 26 padding bytes (Nd) per sub-block.
+ * byte. We can have from 4 up to 28 padding bytes (Nd) per sub-block.
* After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1
* or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes
* (moving to another column). 2nd parity sub-block uses the same
@@ -797,10 +880,10 @@ remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
* 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the
* last byte will be the first byte from the sub-block:
* (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller
- * than 2 so we know that bytes with ids 0 and 1 must be the padding
- * bytes. The bytes from the 1st parity sub-block are the bytes from the
- * 31st column - Nd can't be greater than 26 so we are sure that there
- * are no padding bytes in 31st column.
+ * than 4 so we know that bytes with ids 0, 1, 2 and 3 must be the
+ * padding bytes. The bytes from the 1st parity sub-block are the bytes
+ * from the 31st column - Nd can't be greater than 28 so we are sure
+ * that there are no padding bytes in 31st column.
*/
rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1);
}
@@ -815,14 +898,14 @@ move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k,
rte_memcpy(&out[nd], in, d);
rte_memcpy(&out[nd + kpi + 64], &in[kpi], d);
- rte_memcpy(&out[nd + 2 * (kpi + 64)], &in[2 * kpi], d);
+ rte_memcpy(&out[(nd - 1) + 2 * (kpi + 64)], &in[2 * kpi], d);
}
static inline void
process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in,
struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset,
- bool check_crc_24b, uint16_t total_left)
+ bool check_crc_24b, uint16_t crc24_overlap, uint16_t total_left)
{
int ret;
int32_t k_idx;
@@ -880,7 +963,7 @@ process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
adapter_resp.pharqout = q->adapter_output;
bblib_turbo_adapter_ul(&adapter_req, &adapter_resp);
- out = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3));
+ out = (uint8_t *)rte_pktmbuf_append(m_out, ((k - crc24_overlap) >> 3));
if (out == NULL) {
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
rte_bbdev_log(ERR, "Too little space in output mbuf");
@@ -898,6 +981,8 @@ process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
turbo_req.k = k;
turbo_req.k_idx = k_idx;
turbo_req.max_iter_num = dec->iter_max;
+ turbo_req.early_term_disable = !check_bit(dec->op_flags,
+ RTE_BBDEV_TURBO_EARLY_TERMINATION);
turbo_resp.ag_buf = q->ag;
turbo_resp.cb_buf = q->code_block;
turbo_resp.output = out;
@@ -920,6 +1005,7 @@ enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
{
uint8_t c, r = 0;
uint16_t kw, k = 0;
+ uint16_t crc24_overlap = 0;
struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
struct rte_mbuf *m_in = dec->input.data;
struct rte_mbuf *m_out = dec->hard_output.data;
@@ -943,6 +1029,10 @@ enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
c = 1;
}
+ if ((c > 1) && !check_bit(dec->op_flags,
+ RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))
+ crc24_overlap = 24;
+
while (total_left > 0) {
if (dec->code_block_mode == 0)
k = (r < dec->tb_params.c_neg) ?
@@ -958,21 +1048,22 @@ enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
* where D is the size of each output from turbo encoder block
* (k + 4).
*/
- kw = RTE_ALIGN_CEIL(k + 4, C_SUBBLOCK) * 3;
+ kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_C_SUBBLOCK) * 3;
process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset,
out_offset, check_bit(dec->op_flags,
- RTE_BBDEV_TURBO_CRC_TYPE_24B), total_left);
- /* As a result of decoding we get Code Block with included
- * decoded CRC24 at the end of Code Block. Type of CRC24 is
- * specified by flag.
+ RTE_BBDEV_TURBO_CRC_TYPE_24B), crc24_overlap,
+ total_left);
+ /* To keep CRC24 attached to end of Code block, use
+ * RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP flag as it
+ * removed by default once verified.
*/
/* Update total_left */
total_left -= kw;
/* Update offsets for next CBs (if exist) */
in_offset += kw;
- out_offset += (k >> 3);
+ out_offset += ((k - crc24_overlap) >> 3);
r++;
}
if (total_left != 0) {
@@ -1004,7 +1095,7 @@ enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
struct turbo_sw_queue *q = queue;
uint16_t nb_enqueued = 0;
- nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops);
+ nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops, &q_data->queue_stats);
q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
q_data->queue_stats.enqueued_count += nb_enqueued;
@@ -1206,6 +1297,7 @@ RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
TURBO_SW_MAX_NB_QUEUES_ARG"=<int> "
TURBO_SW_SOCKET_ID_ARG"=<int>");
+RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, turbo_sw);
RTE_INIT(null_bbdev_init_log);
static void
diff --git a/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map b/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
index 58b94270..58b94270 100644
--- a/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
+++ b/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 7ef25939..ef7f2475 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -4,7 +4,10 @@
include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += dpaa
+ifeq ($(CONFIG_RTE_EAL_VFIO),y)
DIRS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga
DIRS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci
DIRS-$(CONFIG_RTE_LIBRTE_VDEV_BUS) += vdev
diff --git a/drivers/bus/dpaa/base/fman/fman.c b/drivers/bus/dpaa/base/fman/fman.c
index bda62e01..bdb70042 100644
--- a/drivers/bus/dpaa/base/fman/fman.c
+++ b/drivers/bus/dpaa/base/fman/fman.c
@@ -300,7 +300,7 @@ fman_if_init(const struct device_node *dpa_node)
_errno = fman_get_mac_index(regs_addr_host, &__if->__if.mac_idx);
if (_errno) {
- FMAN_ERR(-EINVAL, "Invalid register address: %lu",
+ FMAN_ERR(-EINVAL, "Invalid register address: %" PRIx64,
regs_addr_host);
goto err;
}
@@ -442,6 +442,7 @@ fman_if_init(const struct device_node *dpa_node)
if (!pool_node) {
FMAN_ERR(-ENXIO, "%s: bad fsl,bman-buffer-pools\n",
dname);
+ free(bpool);
goto err;
}
pname = pool_node->full_name;
@@ -449,6 +450,7 @@ fman_if_init(const struct device_node *dpa_node)
prop = of_get_property(pool_node, "fsl,bpid", &proplen);
if (!prop) {
FMAN_ERR(-EINVAL, "%s: no fsl,bpid\n", pname);
+ free(bpool);
goto err;
}
assert(proplen == sizeof(*prop));
@@ -502,7 +504,7 @@ fman_if_init(const struct device_node *dpa_node)
/* Parsing of the network interface is complete, add it to the list */
DPAA_BUS_LOG(DEBUG, "Found %s, Tx Channel = %x, FMAN = %x,"
- "Port ID = %x\n",
+ "Port ID = %x",
dname, __if->__if.tx_channel_id, __if->__if.fman_idx,
__if->__if.mac_idx);
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
index a1ef3921..1381da36 100644
--- a/drivers/bus/dpaa/base/qbman/bman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -161,7 +161,7 @@ int bman_init_ccsr(const struct device_node *node)
PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
if (bman_ccsr_map == MAP_FAILED) {
pr_err("Can not map BMan CCSR base Bman: "
- "0x%x Phys: 0x%lx size 0x%lx",
+ "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64,
*bman_addr, phys_addr, regs_size);
return -EINVAL;
}
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index 2b97671b..27d98cc1 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -314,9 +314,9 @@ loop:
if (!msg)
return 0;
}
- if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
/* We aren't draining anything but FQRNIs */
- pr_err("Found verb 0x%x in MR\n", msg->verb);
+ pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
return -1;
}
qm_mr_next(p);
@@ -483,7 +483,7 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
/* when accessing 'verb', use __raw_readb() to ensure that compiler
* inlining doesn't try to optimise out "excess reads".
*/
- if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
if (!mr->pi)
mr->vbit ^= QM_MR_VERB_VBIT;
@@ -832,7 +832,7 @@ mr_loop:
goto mr_done;
swapped_msg = *msg;
hw_fd_to_cpu(&swapped_msg.ern.fd);
- verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
/* The message is a software ERN iff the 0x20 bit is set */
if (verb & 0x20) {
switch (verb) {
@@ -1087,7 +1087,7 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
shadow[rx_number]->fd.opaque =
be32_to_cpu(dq[rx_number]->fd.opaque);
#else
- shadow = dq;
+ shadow[rx_number] = dq[rx_number];
#endif
/* SDQCR: context_b points to the FQ */
@@ -1095,7 +1095,8 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
fq[rx_number] = qman_fq_lookup_table[be32_to_cpu(
dq[rx_number]->contextB)];
#else
- fq[rx_number] = (void *)(uintptr_t)be32_to_cpu(dq->contextB);
+ fq[rx_number] = (void *)be32_to_cpu(
+ dq[rx_number]->contextB);
#endif
fq[rx_number]->cb.dqrr_prepare(shadow[rx_number],
&bufs[rx_number]);
@@ -1665,7 +1666,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
*/
struct qm_mr_entry msg;
- msg.verb = QM_MR_VERB_FQRNI;
+ msg.ern.verb = QM_MR_VERB_FQRNI;
msg.fq.fqs = mcr->alterfq.fqs;
msg.fq.fqid = fq->fqid;
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
@@ -2642,7 +2643,7 @@ int qman_shutdown_fq(u32 fqid)
qm_mr_pvb_update(low_p);
msg = qm_mr_current(low_p);
while (msg) {
- if ((msg->verb &
+ if ((msg->ern.verb &
QM_MR_VERB_TYPE_MASK)
== QM_MR_VERB_FQRN)
found_fqrn = 1;
@@ -2710,7 +2711,7 @@ int qman_shutdown_fq(u32 fqid)
qm_mr_pvb_update(low_p);
msg = qm_mr_current(low_p);
while (msg) {
- if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
QM_MR_VERB_FQRL)
orl_empty = 1;
qm_mr_next(low_p);
diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c
index 7cfa8ee4..07b29d55 100644
--- a/drivers/bus/dpaa/base/qbman/qman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -160,6 +160,7 @@ struct qman_portal *fsl_qman_portal_create(void)
&cpuset);
if (ret) {
error(0, ret, "pthread_getaffinity_np()");
+ kfree(q_pcfg);
return NULL;
}
@@ -168,12 +169,14 @@ struct qman_portal *fsl_qman_portal_create(void)
if (CPU_ISSET(loop, &cpuset)) {
if (q_pcfg->cpu != -1) {
pr_err("Thread is not affine to 1 cpu\n");
+ kfree(q_pcfg);
return NULL;
}
q_pcfg->cpu = loop;
}
if (q_pcfg->cpu == -1) {
pr_err("Bug in getaffinity handling!\n");
+ kfree(q_pcfg);
return NULL;
}
@@ -183,6 +186,7 @@ struct qman_portal *fsl_qman_portal_create(void)
ret = process_portal_map(&q_map);
if (ret) {
error(0, ret, "process_portal_map()");
+ kfree(q_pcfg);
return NULL;
}
q_pcfg->channel = q_map.channel;
@@ -217,6 +221,7 @@ err2:
close(q_fd);
err1:
process_portal_unmap(&q_map.addr);
+ kfree(q_pcfg);
return NULL;
}
@@ -246,7 +251,6 @@ int fsl_qman_portal_destroy(struct qman_portal *qp)
int qman_global_init(void)
{
const struct device_node *dt_node;
- int ret = 0;
size_t lenp;
const u32 *chanid;
static int ccsr_map_fd;
@@ -352,9 +356,7 @@ int qman_global_init(void)
qman_clk = be32_to_cpu(*clk);
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- ret = qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX);
- if (ret)
- return ret;
+ return qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX);
#endif
return 0;
}
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index f2bb3b15..20462065 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -19,7 +19,6 @@
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_debug.h>
-#include <rte_pci.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
@@ -55,6 +54,8 @@ pthread_key_t dpaa_portal_key;
unsigned int dpaa_svr_family;
+#define FSL_DPAA_BUS_NAME dpaa_bus
+
RTE_DEFINE_PER_LCORE(bool, dpaa_io);
RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
@@ -130,6 +131,22 @@ dpaa_sec_available(void)
static void dpaa_clean_device_list(void);
+static struct rte_devargs *
+dpaa_devargs_lookup(struct rte_dpaa_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
static int
dpaa_create_device_list(void)
{
@@ -161,8 +178,9 @@ dpaa_create_device_list(void)
memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
fman_intf->mac_idx);
- DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
+ DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
dpaa_add_to_device_list(dev);
}
@@ -198,7 +216,9 @@ dpaa_create_device_list(void)
*/
memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
sprintf(dev->name, "dpaa-sec%d", i);
- DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
+ DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
dpaa_add_to_device_list(dev);
}
@@ -235,7 +255,7 @@ int rte_dpaa_portal_init(void *arg)
BUS_INIT_FUNC_TRACE();
- if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY)
+ if ((size_t)arg == 1 || cpu == LCORE_ID_ANY)
cpu = rte_get_master_lcore();
/* if the core id is not supported */
else
@@ -309,9 +329,15 @@ rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
/* Affine above created portal with channel*/
u32 sdqcr;
struct qman_portal *qp;
+ int ret;
- if (unlikely(!RTE_PER_LCORE(dpaa_io)))
- rte_dpaa_portal_init(arg);
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init(arg);
+ if (ret < 0) {
+ DPAA_BUS_LOG(ERR, "portal initialization failure");
+ return ret;
+ }
+ }
/* Initialise qman specific portals */
qp = fsl_qman_portal_create();
@@ -352,6 +378,51 @@ dpaa_portal_finish(void *arg)
RTE_PER_LCORE(dpaa_io) = false;
}
+static int
+rte_dpaa_bus_parse(const char *name, void *out_name)
+{
+ int i, j;
+ int max_fman = 2, max_macs = 16;
+ char *sep = strchr(name, ':');
+
+ if (strncmp(name, RTE_STR(FSL_DPAA_BUS_NAME),
+ strlen(RTE_STR(FSL_DPAA_BUS_NAME)))) {
+ return -EINVAL;
+ }
+
+ if (!sep) {
+ DPAA_BUS_ERR("Incorrect device name observed");
+ return -EINVAL;
+ }
+
+ sep = (char *) (sep + 1);
+
+ for (i = 0; i < max_fman; i++) {
+ for (j = 0; j < max_macs; j++) {
+ char fm_name[16];
+ snprintf(fm_name, 16, "fm%d-mac%d", i, j);
+ if (strcmp(fm_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ return 0;
+ }
+ }
+ }
+
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ char sec_name[16];
+
+ snprintf(sec_name, 16, "dpaa-sec%d", i);
+ if (strcmp(sec_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
#define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
#define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
@@ -390,14 +461,11 @@ rte_dpaa_bus_scan(void)
return 0;
}
- DPAA_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d",
- dpaa_netcfg, dpaa_netcfg->num_ethports);
-
#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
dump_netcfg(dpaa_netcfg);
#endif
- DPAA_BUS_LOG(DEBUG, "Number of devices = %d\n",
+ DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
dpaa_netcfg->num_ethports);
ret = dpaa_create_device_list();
if (ret) {
@@ -415,9 +483,6 @@ rte_dpaa_bus_scan(void)
return ret;
}
- DPAA_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n",
- (unsigned int)dpaa_portal_key, ret);
-
return 0;
}
@@ -453,22 +518,15 @@ static int
rte_dpaa_device_match(struct rte_dpaa_driver *drv,
struct rte_dpaa_device *dev)
{
- int ret = -1;
-
- BUS_INIT_FUNC_TRACE();
-
if (!drv || !dev) {
DPAA_BUS_DEBUG("Invalid drv or dev received.");
- return ret;
+ return -1;
}
- if (drv->drv_type == dev->device_type) {
- DPAA_BUS_INFO("Device: %s matches for driver: %s",
- dev->name, drv->driver.name);
- ret = 0; /* Found a match */
- }
+ if (drv->drv_type == dev->device_type)
+ return 0;
- return ret;
+ return -1;
}
static int
@@ -479,8 +537,7 @@ rte_dpaa_bus_probe(void)
struct rte_dpaa_driver *drv;
FILE *svr_file = NULL;
unsigned int svr_ver;
-
- BUS_INIT_FUNC_TRACE();
+ int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
/* For each registered driver, and device, call the driver->probe */
TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
@@ -489,13 +546,19 @@ rte_dpaa_bus_probe(void)
if (ret)
continue;
- if (!drv->probe)
+ if (!drv->probe ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLACKLISTED))
continue;
- ret = drv->probe(drv, dev);
- if (ret)
- DPAA_BUS_ERR("Unable to probe.\n");
-
+ if (probe_all ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy ==
+ RTE_DEV_WHITELISTED)) {
+ ret = drv->probe(drv, dev);
+ if (ret)
+ DPAA_BUS_ERR("Unable to probe.\n");
+ }
break;
}
}
@@ -552,6 +615,7 @@ struct rte_dpaa_bus rte_dpaa_bus = {
.bus = {
.scan = rte_dpaa_bus_scan,
.probe = rte_dpaa_bus_probe,
+ .parse = rte_dpaa_bus_parse,
.find_device = rte_dpaa_find_device,
.get_iommu_class = rte_dpaa_get_iommu_class,
},
@@ -574,11 +638,11 @@ dpaa_init_log(void)
if (dpaa_logtype_mempool >= 0)
rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
- dpaa_logtype_pmd = rte_log_register("pmd.dpaa");
+ dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa");
if (dpaa_logtype_pmd >= 0)
rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
- dpaa_logtype_eventdev = rte_log_register("eventdev.dpaa");
+ dpaa_logtype_eventdev = rte_log_register("pmd.event.dpaa");
if (dpaa_logtype_eventdev >= 0)
rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
}
diff --git a/drivers/bus/dpaa/include/compat.h b/drivers/bus/dpaa/include/compat.h
index 53707bb4..e4b57021 100644
--- a/drivers/bus/dpaa/include/compat.h
+++ b/drivers/bus/dpaa/include/compat.h
@@ -39,6 +39,7 @@
#include <rte_spinlock.h>
#include <rte_common.h>
#include <rte_debug.h>
+#include <rte_cycles.h>
/* The following definitions are primarily to allow the single-source driver
* interfaces to be included by arbitrary program code. Ie. for interfaces that
@@ -127,13 +128,15 @@ static inline void out_be32(volatile void *__p, u32 val)
*p = rte_cpu_to_be_32(val);
}
+#define hwsync() rte_rmb()
+#define lwsync() rte_wmb()
+
#define dcbt_ro(p) __builtin_prefetch(p, 0)
#define dcbt_rw(p) __builtin_prefetch(p, 1)
+#if defined(RTE_ARCH_ARM64)
#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
#define dcbz_64(p) dcbz(p)
-#define hwsync() rte_rmb()
-#define lwsync() rte_wmb()
#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
#define dcbf_64(p) dcbf(p)
#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
@@ -144,9 +147,27 @@ static inline void out_be32(volatile void *__p, u32 val)
asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \
} while (0)
+#elif defined(RTE_ARCH_ARM)
+#define dcbz(p) memset((p), 0, 32)
+#define dcbz_64(p) memset((p), 0, 64)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+
+#else
+#define dcbz(p) RTE_SET_USED(p)
+#define dcbz_64(p) dcbz(p)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+#endif
+
#define barrier() { asm volatile ("" : : : "memory"); }
#define cpu_relax barrier
+#if defined(RTE_ARCH_ARM64)
static inline uint64_t mfatb(void)
{
uint64_t ret, ret_new, timeout = 200;
@@ -160,6 +181,11 @@ static inline uint64_t mfatb(void)
DPAA_BUG_ON(!timeout && (ret != ret_new));
return ret * 64;
}
+#else
+
+#define mfatb rte_rdtsc
+
+#endif
/* Spin for a few cycles without bothering the bus */
static inline void cpu_spin(int cycles)
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index e9793f30..e4ad7ae4 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -284,20 +284,20 @@ static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
} while (0)
/* See 1.5.8.1: "Enqueue Command" */
-struct qm_eqcr_entry {
+struct __rte_aligned(8) qm_eqcr_entry {
u8 __dont_write_directly__verb;
u8 dca;
u16 seqnum;
u32 orp; /* 24-bit */
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
+ struct qm_fd fd; /* this has alignment 8 */
u8 __reserved3[32];
} __packed;
/* "Frame Dequeue Response" */
-struct qm_dqrr_entry {
+struct __rte_aligned(8) qm_dqrr_entry {
u8 verb;
u8 stat;
u16 seqnum; /* 15-bit */
@@ -305,7 +305,7 @@ struct qm_dqrr_entry {
u8 __reserved2[3];
u32 fqid; /* 24-bit */
u32 contextB;
- struct qm_fd fd;
+ struct qm_fd fd; /* this has alignment 8 */
u8 __reserved4[32];
};
@@ -323,18 +323,19 @@ struct qm_dqrr_entry {
/* "ERN Message Response" */
/* "FQ State Change Notification" */
struct qm_mr_entry {
- u8 verb;
union {
struct {
+ u8 verb;
u8 dca;
u16 seqnum;
u8 rc; /* Rejection Code */
u32 orp:24;
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
- } __packed ern;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) ern;
struct {
+ u8 verb;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
u8 __reserved1:4;
@@ -349,18 +350,19 @@ struct qm_mr_entry {
u32 __reserved3:24;
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
- } __packed dcern;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) dcern;
struct {
+ u8 verb;
u8 fqs; /* Frame Queue Status */
u8 __reserved1[6];
u32 fqid; /* 24-bit */
u32 contextB;
u8 __reserved2[16];
- } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+ } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */
};
u8 __reserved2[32];
-} __packed;
+} __packed __rte_aligned(8);
#define QM_MR_VERB_VBIT 0x80
/*
* ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
diff --git a/drivers/bus/dpaa/meson.build b/drivers/bus/dpaa/meson.build
new file mode 100644
index 00000000..d10b62c0
--- /dev/null
+++ b/drivers/bus/dpaa/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['eventdev']
+sources = files('base/fman/fman.c',
+ 'base/fman/fman_hw.c',
+ 'base/fman/netcfg_layer.c',
+ 'base/fman/of.c',
+ 'base/qbman/bman.c',
+ 'base/qbman/bman_driver.c',
+ 'base/qbman/dpaa_alloc.c',
+ 'base/qbman/dpaa_sys.c',
+ 'base/qbman/process.c',
+ 'base/qbman/qman.c',
+ 'base/qbman/qman_driver.c',
+ 'dpaa_bus.c')
+
+allow_experimental_apis = true
+
+if cc.has_argument('-Wno-cast-qual')
+ cflags += '-Wno-cast-qual'
+endif
+
+includes += include_directories('include', 'base/qbman')
+cflags += ['-D_GNU_SOURCE']
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index 718701b1..8573bd6e 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -15,8 +15,6 @@
#include <of.h>
#include <netcfg.h>
-#define FSL_DPAA_BUS_NAME "FSL_DPAA_BUS"
-
#define DPAA_MEMPOOL_OPS_NAME "dpaa"
#define DEV_TO_DPAA_DEVICE(ptr) \
@@ -95,20 +93,35 @@ struct dpaa_portal {
uint64_t tid;/**< Parent Thread id for this portal */
};
-/* TODO - this is costly, need to write a fast coversion routine */
+/* Various structures representing contiguous memory maps */
+struct dpaa_memseg {
+ TAILQ_ENTRY(dpaa_memseg) next;
+ char *vaddr;
+ rte_iova_t iova;
+ size_t len;
+};
+
+TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg);
+extern struct dpaa_memseg_list rte_dpaa_memsegs;
+
+/* Either iterate over the list of internal memseg references or fallback to
+ * EAL memseg based iova2virt.
+ */
static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- int i;
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr != NULL; i++) {
- if (paddr >= memseg[i].iova && paddr <
- memseg[i].iova + memseg[i].len)
- return (uint8_t *)(memseg[i].addr) +
- (paddr - memseg[i].iova);
+ struct dpaa_memseg *ms;
+
+ /* Check if the address is already part of the memseg list internally
+ * maintained by the dpaa driver.
+ */
+ TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) {
+ if (paddr >= ms->iova && paddr <
+ ms->iova + ms->len)
+ return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
}
- return NULL;
+ /* If not, Fallback to full memseg list searching */
+ return rte_mem_iova2virt(paddr);
}
/**
diff --git a/drivers/bus/dpaa/rte_dpaa_logs.h b/drivers/bus/dpaa/rte_dpaa_logs.h
index 0fd70bbf..e4143543 100644
--- a/drivers/bus/dpaa/rte_dpaa_logs.h
+++ b/drivers/bus/dpaa/rte_dpaa_logs.h
@@ -15,10 +15,7 @@ extern int dpaa_logtype_pmd;
extern int dpaa_logtype_eventdev;
#define DPAA_BUS_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "%s(): " fmt "\n", \
- __func__, ##args)
-
-#define BUS_INIT_FUNC_TRACE() DPAA_BUS_LOG(DEBUG, " >>")
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "dpaa: " fmt "\n", ##args)
#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
#define DPAA_BUS_HWWARN(cond, fmt, args...) \
@@ -31,7 +28,11 @@ extern int dpaa_logtype_eventdev;
#endif
#define DPAA_BUS_DEBUG(fmt, args...) \
- DPAA_BUS_LOG(DEBUG, fmt, ## args)
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_bus, "dpaa: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA_BUS_DEBUG(" >>")
+
#define DPAA_BUS_INFO(fmt, args...) \
DPAA_BUS_LOG(INFO, fmt, ## args)
#define DPAA_BUS_ERR(fmt, args...) \
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
index de237f0f..515d0f53 100644
--- a/drivers/bus/fslmc/Makefile
+++ b/drivers/bus/fslmc/Makefile
@@ -9,23 +9,13 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_bus_fslmc.a
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
-CONFIG_RTE_LIBRTE_FSLMC_BUS = $(CONFIG_RTE_LIBRTE_DPAA2_PMD)
-endif
-
CFLAGS += -DALLOW_EXPERIMENTAL_API
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
-CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev
@@ -42,11 +32,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
mc/dpmng.c \
- mc/dpbp.c \
- mc/dpio.c \
- mc/mc_sys.c \
+ mc/dpbp.c \
+ mc/dpio.c \
+ mc/mc_sys.c \
mc/dpcon.c \
- mc/dpci.c
+ mc/dpci.c \
+ mc/dpdmai.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpbp.c
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index 5ee0beb8..c6301b20 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -18,11 +18,12 @@
#include <rte_fslmc.h>
#include <fslmc_vfio.h>
+#include "fslmc_logs.h"
-#define FSLMC_BUS_LOG(level, fmt, args...) \
- RTE_LOG(level, EAL, fmt "\n", ##args)
+int dpaa2_logtype_bus;
#define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups"
+#define FSLMC_BUS_NAME fslmc
struct rte_fslmc_bus rte_fslmc_bus;
uint8_t dpaa2_virt_mode;
@@ -93,6 +94,41 @@ insert_in_device_list(struct rte_dpaa2_device *newdev)
TAILQ_INSERT_TAIL(&rte_fslmc_bus.device_list, newdev, next);
}
+static struct rte_devargs *
+fslmc_devargs_lookup(struct rte_dpaa2_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("fslmc", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA2_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
+static void
+dump_device_list(void)
+{
+ struct rte_dpaa2_device *dev;
+ uint32_t global_log_level;
+ int local_log_level;
+
+ /* Only if the log level has been set to Debugging, print list */
+ global_log_level = rte_log_get_global_level();
+ local_log_level = rte_log_get_level(dpaa2_logtype_bus);
+ if (global_log_level == RTE_LOG_DEBUG ||
+ local_log_level == RTE_LOG_DEBUG) {
+ DPAA2_BUS_LOG(DEBUG, "List of devices scanned on bus:");
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ DPAA2_BUS_LOG(DEBUG, "\t\t%s", dev->device.name);
+ }
+ }
+}
+
static int
scan_one_fslmc_device(char *dev_name)
{
@@ -109,7 +145,7 @@ scan_one_fslmc_device(char *dev_name)
/* Creating a temporary copy to perform cut-parse over string */
dup_dev_name = strdup(dev_name);
if (!dup_dev_name) {
- FSLMC_BUS_LOG(ERR, "Out of memory.");
+ DPAA2_BUS_ERR("Unable to allocate device name memory");
return -ENOMEM;
}
@@ -120,7 +156,7 @@ scan_one_fslmc_device(char *dev_name)
*/
dev = calloc(1, sizeof(struct rte_dpaa2_device));
if (!dev) {
- FSLMC_BUS_LOG(ERR, "Out of memory.");
+ DPAA2_BUS_ERR("Unable to allocate device object");
free(dup_dev_name);
return -ENOMEM;
}
@@ -128,7 +164,7 @@ scan_one_fslmc_device(char *dev_name)
/* Parse the device name and ID */
t_ptr = strtok(dup_dev_name, ".");
if (!t_ptr) {
- FSLMC_BUS_LOG(ERR, "Incorrect device string observed.");
+ DPAA2_BUS_ERR("Incorrect device name observed");
goto cleanup;
}
if (!strncmp("dpni", t_ptr, 4))
@@ -145,6 +181,8 @@ scan_one_fslmc_device(char *dev_name)
dev->dev_type = DPAA2_CI;
else if (!strncmp("dpmcp", t_ptr, 5))
dev->dev_type = DPAA2_MPORTAL;
+ else if (!strncmp("dpdmai", t_ptr, 6))
+ dev->dev_type = DPAA2_QDMA;
else
dev->dev_type = DPAA2_UNKNOWN;
@@ -153,17 +191,17 @@ scan_one_fslmc_device(char *dev_name)
t_ptr = strtok(NULL, ".");
if (!t_ptr) {
- FSLMC_BUS_LOG(ERR, "Incorrect device string observed (%s).",
- t_ptr);
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
goto cleanup;
}
sscanf(t_ptr, "%hu", &dev->object_id);
dev->device.name = strdup(dev_name);
if (!dev->device.name) {
- FSLMC_BUS_LOG(ERR, "Out of memory.");
+ DPAA2_BUS_ERR("Unable to clone device name. Out of memory");
goto cleanup;
}
+ dev->device.devargs = fslmc_devargs_lookup(dev);
/* Add device in the fslmc device list */
insert_in_device_list(dev);
@@ -182,6 +220,54 @@ cleanup:
}
static int
+rte_fslmc_parse(const char *name, void *addr)
+{
+ uint16_t dev_id;
+ char *t_ptr;
+ char *sep = strchr(name, ':');
+
+ if (strncmp(name, RTE_STR(FSLMC_BUS_NAME),
+ strlen(RTE_STR(FSLMC_BUS_NAME)))) {
+ return -EINVAL;
+ }
+
+ if (!sep) {
+ DPAA2_BUS_ERR("Incorrect device name observed");
+ return -EINVAL;
+ }
+
+ t_ptr = (char *)(sep + 1);
+
+ if (strncmp("dpni", t_ptr, 4) &&
+ strncmp("dpseci", t_ptr, 6) &&
+ strncmp("dpcon", t_ptr, 5) &&
+ strncmp("dpbp", t_ptr, 4) &&
+ strncmp("dpio", t_ptr, 4) &&
+ strncmp("dpci", t_ptr, 4) &&
+ strncmp("dpmcp", t_ptr, 5) &&
+ strncmp("dpdmai", t_ptr, 6)) {
+ DPAA2_BUS_ERR("Unknown or unsupported device");
+ return -EINVAL;
+ }
+
+ t_ptr = strchr(name, '.');
+ if (!t_ptr) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ return -EINVAL;
+ }
+
+ t_ptr = (char *)(t_ptr + 1);
+ if (sscanf(t_ptr, "%hu", &dev_id) <= 0) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ return -EINVAL;
+ }
+
+ if (addr)
+ strcpy(addr, (char *)(sep + 1));
+ return 0;
+}
+
+static int
rte_fslmc_scan(void)
{
int ret;
@@ -193,8 +279,7 @@ rte_fslmc_scan(void)
int groupid;
if (process_once) {
- FSLMC_BUS_LOG(DEBUG,
- "Fslmc bus already scanned. Not rescanning");
+ DPAA2_BUS_DEBUG("Fslmc bus already scanned. Not rescanning");
return 0;
}
process_once = 1;
@@ -208,7 +293,7 @@ rte_fslmc_scan(void)
groupid);
dir = opendir(fslmc_dirpath);
if (!dir) {
- FSLMC_BUS_LOG(ERR, "Unable to open VFIO group dir.");
+ DPAA2_BUS_ERR("Unable to open VFIO group directory");
goto scan_fail;
}
@@ -224,9 +309,12 @@ rte_fslmc_scan(void)
device_count += 1;
}
- FSLMC_BUS_LOG(INFO, "fslmc: Bus scan completed");
-
closedir(dir);
+
+ DPAA2_BUS_INFO("FSLMC Bus scan completed");
+ /* If debugging is enabled, device list is dumped to log output */
+ dump_device_list();
+
return 0;
scan_fail_cleanup:
@@ -235,7 +323,7 @@ scan_fail_cleanup:
/* Remove all devices in the list */
cleanup_fslmc_device_list();
scan_fail:
- FSLMC_BUS_LOG(DEBUG, "FSLMC Bus Not Available. Skipping.");
+ DPAA2_BUS_DEBUG("FSLMC Bus Not Available. Skipping");
/* Irrespective of failure, scan only return success */
return 0;
}
@@ -254,6 +342,8 @@ static int
rte_fslmc_probe(void)
{
int ret = 0;
+ int probe_all;
+
struct rte_dpaa2_device *dev;
struct rte_dpaa2_driver *drv;
@@ -262,16 +352,29 @@ rte_fslmc_probe(void)
ret = fslmc_vfio_setup_group();
if (ret) {
- FSLMC_BUS_LOG(ERR, "Unable to setup VFIO %d", ret);
+ DPAA2_BUS_ERR("Unable to setup VFIO %d", ret);
+ return 0;
+ }
+
+ /* Map existing segments as well as, in case of hotpluggable memory,
+ * install callback handler.
+ */
+ ret = rte_fslmc_vfio_dmamap();
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to DMA map existing VAs: (%d)", ret);
+ /* Not continuing ahead */
+ DPAA2_BUS_ERR("FSLMC VFIO Mapping failed");
return 0;
}
ret = fslmc_vfio_process_group();
if (ret) {
- FSLMC_BUS_LOG(ERR, "Unable to setup devices %d", ret);
+ DPAA2_BUS_ERR("Unable to setup devices %d", ret);
return 0;
}
+ probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+
TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
ret = rte_fslmc_match(drv, dev);
@@ -281,9 +384,21 @@ rte_fslmc_probe(void)
if (!drv->probe)
continue;
- ret = drv->probe(drv, dev);
- if (ret)
- FSLMC_BUS_LOG(ERR, "Unable to probe.\n");
+ if (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
+ DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
+ dev->device.name);
+ continue;
+ }
+
+ if (probe_all ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy ==
+ RTE_DEV_WHITELISTED)) {
+ ret = drv->probe(drv, dev);
+ if (ret)
+ DPAA2_BUS_ERR("Unable to probe");
+ }
break;
}
}
@@ -298,16 +413,19 @@ static struct rte_device *
rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
const void *data)
{
+ const struct rte_dpaa2_device *dstart;
struct rte_dpaa2_device *dev;
- TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
- if (start && &dev->device == start) {
- start = NULL; /* starting point found */
- continue;
- }
-
+ if (start != NULL) {
+ dstart = RTE_DEV_TO_FSLMC_CONST(start);
+ dev = TAILQ_NEXT(dstart, next);
+ } else {
+ dev = TAILQ_FIRST(&rte_fslmc_bus.device_list);
+ }
+ while (dev != NULL) {
if (cmp(&dev->device, data) == 0)
return &dev->device;
+ dev = TAILQ_NEXT(dev, next);
}
return NULL;
@@ -390,6 +508,7 @@ struct rte_fslmc_bus rte_fslmc_bus = {
.bus = {
.scan = rte_fslmc_scan,
.probe = rte_fslmc_probe,
+ .parse = rte_fslmc_parse,
.find_device = rte_fslmc_find_device,
.get_iommu_class = rte_dpaa2_get_iommu_class,
},
@@ -398,4 +517,14 @@ struct rte_fslmc_bus rte_fslmc_bus = {
.device_count = {0},
};
-RTE_REGISTER_BUS(fslmc, rte_fslmc_bus.bus);
+RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus);
+
+RTE_INIT(fslmc_init_log);
+static void
+fslmc_init_log(void)
+{
+ /* Bus level logs */
+ dpaa2_logtype_bus = rte_log_register("bus.fslmc");
+ if (dpaa2_logtype_bus >= 0)
+ rte_log_set_level(dpaa2_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h
index d87b6386..9750b8c8 100644
--- a/drivers/bus/fslmc/fslmc_logs.h
+++ b/drivers/bus/fslmc/fslmc_logs.h
@@ -7,44 +7,35 @@
#ifndef _FSLMC_LOGS_H_
#define _FSLMC_LOGS_H_
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE
-#define PMD_TX_FREE_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+extern int dpaa2_logtype_bus;
+
+#define DPAA2_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, "fslmc: " fmt "\n", \
+ ##args)
+
+/* Debug logs are with Function names */
+#define DPAA2_BUS_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_bus, "fslmc: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA2_BUS_LOG(DEBUG, " >>")
+
+#define DPAA2_BUS_INFO(fmt, args...) \
+ DPAA2_BUS_LOG(INFO, fmt, ## args)
+#define DPAA2_BUS_ERR(fmt, args...) \
+ DPAA2_BUS_LOG(ERR, fmt, ## args)
+#define DPAA2_BUS_WARN(fmt, args...) \
+ DPAA2_BUS_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_BUS_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_BUS_DP_DEBUG(fmt, args...) \
+ DPAA2_BUS_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_BUS_DP_INFO(fmt, args...) \
+ DPAA2_BUS_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_BUS_DP_WARN(fmt, args...) \
+ DPAA2_BUS_DP_LOG(WARNING, fmt, ## args)
#endif /* _FSLMC_LOGS_H_ */
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 1241295b..4c2cd2a8 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -30,17 +30,16 @@
#include <rte_kvargs.h>
#include <rte_dev.h>
#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
#include "rte_fslmc.h"
#include "fslmc_vfio.h"
+#include "fslmc_logs.h"
#include <mc/fsl_dpmng.h>
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
-#define FSLMC_VFIO_LOG(level, fmt, args...) \
- RTE_LOG(level, EAL, fmt "\n", ##args)
-
/** Pathname of FSL-MC devices directory. */
#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
@@ -53,7 +52,6 @@ static int container_device_fd;
static char *g_container;
static uint32_t *msi_intr_vaddr;
void *(*rte_mcp_ptr_list);
-static int is_dma_done;
static struct rte_dpaa2_object_list dpaa2_obj_list =
TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
@@ -76,33 +74,32 @@ fslmc_get_container_group(int *groupid)
if (!g_container) {
container = getenv("DPRC");
if (container == NULL) {
- RTE_LOG(WARNING, EAL, "DPAA2: DPRC not available\n");
+ DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
return -EINVAL;
}
if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) {
- FSLMC_VFIO_LOG(ERR, "Invalid container name: %s\n",
- container);
+ DPAA2_BUS_ERR("Invalid container name: %s", container);
return -1;
}
g_container = strdup(container);
if (!g_container) {
- FSLMC_VFIO_LOG(ERR, "Out of memory.");
+ DPAA2_BUS_ERR("Mem alloc failure; Container name");
return -ENOMEM;
}
}
/* get group number */
- ret = vfio_get_group_no(SYSFS_FSL_MC_DEVICES, g_container, groupid);
+ ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
+ g_container, groupid);
if (ret <= 0) {
- FSLMC_VFIO_LOG(ERR, "Unable to find %s IOMMU group",
- g_container);
+ DPAA2_BUS_ERR("Unable to find %s IOMMU group", g_container);
return -1;
}
- FSLMC_VFIO_LOG(DEBUG, "Container: %s has VFIO iommu group id = %d",
- g_container, *groupid);
+ DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
+ g_container, *groupid);
return 0;
}
@@ -113,14 +110,14 @@ vfio_connect_container(void)
int fd, ret;
if (vfio_container.used) {
- FSLMC_VFIO_LOG(DEBUG, "No container available.");
+ DPAA2_BUS_DEBUG("No container available");
return -1;
}
/* Try connecting to vfio container if already created */
if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER,
&vfio_container.fd)) {
- FSLMC_VFIO_LOG(INFO,
+ DPAA2_BUS_DEBUG(
"Container pre-exists with FD[0x%x] for this group",
vfio_container.fd);
vfio_group.container = &vfio_container;
@@ -128,9 +125,9 @@ vfio_connect_container(void)
}
/* Opens main vfio file descriptor which represents the "container" */
- fd = vfio_get_container_fd();
+ fd = rte_vfio_get_container_fd();
if (fd < 0) {
- FSLMC_VFIO_LOG(ERR, "Failed to open VFIO container");
+ DPAA2_BUS_ERR("Failed to open VFIO container");
return -errno;
}
@@ -139,19 +136,19 @@ vfio_connect_container(void)
/* Connect group to container */
ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
if (ret) {
- FSLMC_VFIO_LOG(ERR, "Failed to setup group container");
+ DPAA2_BUS_ERR("Failed to setup group container");
close(fd);
return -errno;
}
ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
if (ret) {
- FSLMC_VFIO_LOG(ERR, "Failed to setup VFIO iommu");
+ DPAA2_BUS_ERR("Failed to setup VFIO iommu");
close(fd);
return -errno;
}
} else {
- FSLMC_VFIO_LOG(ERR, "No supported IOMMU available");
+ DPAA2_BUS_ERR("No supported IOMMU available");
close(fd);
return -EINVAL;
}
@@ -179,7 +176,7 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
if (vaddr == MAP_FAILED) {
- FSLMC_VFIO_LOG(ERR, "Unable to map region (errno = %d)", errno);
+ DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno);
return -errno;
}
@@ -189,88 +186,195 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
if (ret == 0)
return 0;
- FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA fails (errno = %d)", errno);
+ DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
return -errno;
}
-int rte_fslmc_vfio_dmamap(void)
+static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+
+static void
+fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
+ void *arg __rte_unused)
{
+ struct rte_memseg_list *msl;
+ struct rte_memseg *ms;
+ size_t cur_len = 0, map_len = 0;
+ uint64_t virt_addr;
+ rte_iova_t iova_addr;
int ret;
+
+ msl = rte_mem_virt2memseg_list(addr);
+
+ while (cur_len < len) {
+ const void *va = RTE_PTR_ADD(addr, cur_len);
+
+ ms = rte_mem_virt2memseg(va, msl);
+ iova_addr = ms->iova;
+ virt_addr = ms->addr_64;
+ map_len = ms->len;
+
+ DPAA2_BUS_DEBUG("Request for %s, va=%p, "
+ "virt_addr=0x%" PRIx64 ", "
+ "iova=0x%" PRIx64 ", map_len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ?
+ "alloc" : "dealloc",
+ va, virt_addr, iova_addr, map_len);
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
+ else
+ ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
+
+ if (ret != 0) {
+ DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
+ "Map=%d, addr=%p, len=%zu, err:(%d)",
+ type, va, map_len, ret);
+ return;
+ }
+
+ cur_len += map_len;
+ }
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
+ addr, len);
+ else
+ DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
+ addr, len);
+}
+
+static int
+fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
+{
struct fslmc_vfio_group *group;
struct vfio_iommu_type1_dma_map dma_map = {
.argsz = sizeof(struct vfio_iommu_type1_dma_map),
.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
};
+ int ret;
- int i;
- const struct rte_memseg *memseg;
+ dma_map.size = len;
+ dma_map.vaddr = vaddr;
- if (is_dma_done)
- return 0;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ dma_map.iova = iovaddr;
+#else
+ dma_map.iova = dma_map.vaddr;
+#endif
- memseg = rte_eal_get_physmem_layout();
- if (memseg == NULL) {
- FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
- return -ENODEV;
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_group;
+
+ if (!group->container) {
+ DPAA2_BUS_ERR("Container is not connected ");
+ return -1;
}
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (memseg[i].addr == NULL && memseg[i].len == 0) {
- FSLMC_VFIO_LOG(DEBUG, "Total %d segments found.", i);
- break;
- }
+ DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
+ errno);
+ return -1;
+ }
- dma_map.size = memseg[i].len;
- dma_map.vaddr = memseg[i].addr_64;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- dma_map.iova = dma_map.vaddr;
- else
- dma_map.iova = memseg[i].iova;
-#else
- dma_map.iova = dma_map.vaddr;
-#endif
+ return 0;
+}
- /* SET DMA MAP for IOMMU */
- group = &vfio_group;
+static int
+fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
+{
+ struct fslmc_vfio_group *group;
+ struct vfio_iommu_type1_dma_unmap dma_unmap = {
+ .argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
+ .flags = 0,
+ };
+ int ret;
- if (!group->container) {
- FSLMC_VFIO_LOG(ERR, "Container is not connected ");
- return -1;
- }
+ dma_unmap.size = len;
+ dma_unmap.iova = vaddr;
- FSLMC_VFIO_LOG(DEBUG, "-->Initial SHM Virtual ADDR %llX",
- dma_map.vaddr);
- FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX", dma_map.size);
- ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
- &dma_map);
- if (ret) {
- FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API(errno = %d)",
- errno);
- return ret;
- }
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_group;
+
+ if (!group->container) {
+ DPAA2_BUS_ERR("Container is not connected ");
+ return -1;
}
- /* Verifying that at least single segment is available */
- if (i <= 0) {
- FSLMC_VFIO_LOG(ERR, "No Segments found for VFIO Mapping");
+ DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
+ errno);
return -1;
}
+ return 0;
+}
+
+static int
+fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ int *n_segs = arg;
+ int ret;
+
+ ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
+ if (ret)
+ DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
+ ms->addr, ms->len);
+ else
+ (*n_segs)++;
+
+ return ret;
+}
+
+int rte_fslmc_vfio_dmamap(void)
+{
+ int i = 0, ret;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
+
+ /* Lock before parsing and registering callback to memory subsystem */
+ rte_rwlock_read_lock(mem_lock);
+
+ if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
+ rte_rwlock_read_unlock(mem_lock);
+ return -1;
+ }
+
+ ret = rte_mem_event_callback_register("fslmc_memevent_clb",
+ fslmc_memevent_cb, NULL);
+ if (ret && rte_errno == ENOTSUP)
+ DPAA2_BUS_DEBUG("Memory event callbacks not supported");
+ else if (ret)
+ DPAA2_BUS_DEBUG("Unable to install memory handler");
+ else
+ DPAA2_BUS_DEBUG("Installed memory callback handler");
+
+ DPAA2_BUS_DEBUG("Total %d segments found.", i);
+
/* TODO - This is a W.A. as VFIO currently does not add the mapping of
* the interrupt region to SMMU. This should be removed once the
* support is added in the Kernel.
*/
- vfio_map_irq_region(group);
+ vfio_map_irq_region(&vfio_group);
- is_dma_done = 1;
+ /* Existing segments have been mapped and memory callback for hotplug
+ * has been installed.
+ */
+ rte_rwlock_read_unlock(mem_lock);
return 0;
}
static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
{
- int64_t v_addr = (int64_t)MAP_FAILED;
+ intptr_t v_addr = (intptr_t)MAP_FAILED;
int32_t ret, mc_fd;
struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
@@ -279,29 +383,26 @@ static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
/* getting the mcp object's fd*/
mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
if (mc_fd < 0) {
- FSLMC_VFIO_LOG(ERR, "error in VFIO get dev %s fd from group %d",
- mcp_obj, group->fd);
+ DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d",
+ mcp_obj, group->fd);
return v_addr;
}
/* getting device info*/
ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
if (ret < 0) {
- FSLMC_VFIO_LOG(ERR, "error in VFIO getting DEVICE_INFO");
+ DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO");
goto MC_FAILURE;
}
/* getting device region info*/
ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
if (ret < 0) {
- FSLMC_VFIO_LOG(ERR, "error in VFIO getting REGION_INFO");
+ DPAA2_BUS_ERR("Error in VFIO getting REGION_INFO");
goto MC_FAILURE;
}
- FSLMC_VFIO_LOG(DEBUG, "region offset = %llx , region size = %llx",
- reg_info.offset, reg_info.size);
-
- v_addr = (uint64_t)mmap(NULL, reg_info.size,
+ v_addr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
mc_fd, reg_info.offset);
@@ -334,8 +435,8 @@ int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index)
ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
- RTE_LOG(ERR, EAL, "Error:dpaa2 SET IRQs fd=%d, err = %d(%s)\n",
- intr_handle->fd, errno, strerror(errno));
+ DPAA2_BUS_ERR("Error:dpaa2 SET IRQs fd=%d, err = %d(%s)",
+ intr_handle->fd, errno, strerror(errno));
return ret;
}
@@ -359,8 +460,8 @@ int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret)
- RTE_LOG(ERR, EAL,
- "Error disabling dpaa2 interrupts for fd %d\n",
+ DPAA2_BUS_ERR(
+ "Error disabling dpaa2 interrupts for fd %d",
intr_handle->fd);
return ret;
@@ -383,9 +484,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
if (ret < 0) {
- FSLMC_VFIO_LOG(ERR,
- "cannot get IRQ(%d) info, error %i (%s)",
- i, errno, strerror(errno));
+ DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)",
+ i, errno, strerror(errno));
return -1;
}
@@ -399,9 +499,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
/* set up an eventfd for interrupts */
fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (fd < 0) {
- FSLMC_VFIO_LOG(ERR,
- "cannot set up eventfd, error %i (%s)\n",
- errno, strerror(errno));
+ DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)",
+ errno, strerror(errno));
return -1;
}
@@ -430,13 +529,14 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD,
dev->device.name);
if (dev_fd <= 0) {
- FSLMC_VFIO_LOG(ERR, "Unable to obtain device FD for device:%s",
- dev->device.name);
+ DPAA2_BUS_ERR("Unable to obtain device FD for device:%s",
+ dev->device.name);
return -1;
}
if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) {
- FSLMC_VFIO_LOG(ERR, "DPAA2 VFIO_DEVICE_GET_INFO fail");
+ DPAA2_BUS_ERR("Unable to obtain information for device:%s",
+ dev->device.name);
return -1;
}
@@ -461,61 +561,74 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
break;
}
- FSLMC_VFIO_LOG(DEBUG, "Device (%s) abstracted from VFIO",
- dev->device.name);
+ DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO",
+ dev->device.name);
return 0;
}
static int
fslmc_process_mcp(struct rte_dpaa2_device *dev)
{
- int64_t v_addr;
- char *dev_name;
+ int ret;
+ intptr_t v_addr;
+ char *dev_name = NULL;
struct fsl_mc_io dpmng = {0};
struct mc_version mc_ver_info = {0};
rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
if (!rte_mcp_ptr_list) {
- FSLMC_VFIO_LOG(ERR, "Out of memory");
- return -ENOMEM;
+ DPAA2_BUS_ERR("Unable to allocate MC portal memory");
+ ret = -ENOMEM;
+ goto cleanup;
}
dev_name = strdup(dev->device.name);
if (!dev_name) {
- FSLMC_VFIO_LOG(ERR, "Out of memory.");
- free(rte_mcp_ptr_list);
- rte_mcp_ptr_list = NULL;
- return -ENOMEM;
+ DPAA2_BUS_ERR("Unable to allocate MC device name memory");
+ ret = -ENOMEM;
+ goto cleanup;
}
v_addr = vfio_map_mcp_obj(&vfio_group, dev_name);
- if (v_addr == (int64_t)MAP_FAILED) {
- FSLMC_VFIO_LOG(ERR, "Error mapping region (errno = %d)",
- errno);
- free(rte_mcp_ptr_list);
- rte_mcp_ptr_list = NULL;
- return -1;
+ if (v_addr == (intptr_t)MAP_FAILED) {
+ DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno);
+ ret = -1;
+ goto cleanup;
}
/* check the MC version compatibility */
dpmng.regs = (void *)v_addr;
- if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info))
- RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
+ if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) {
+ DPAA2_BUS_ERR("Unable to obtain MC version");
+ ret = -1;
+ goto cleanup;
+ }
if ((mc_ver_info.major != MC_VER_MAJOR) ||
(mc_ver_info.minor < MC_VER_MINOR)) {
- RTE_LOG(ERR, PMD, "DPAA2 MC version not compatible!"
- " Expected %d.%d.x, Detected %d.%d.%d\n",
- MC_VER_MAJOR, MC_VER_MINOR,
- mc_ver_info.major, mc_ver_info.minor,
- mc_ver_info.revision);
- free(rte_mcp_ptr_list);
- rte_mcp_ptr_list = NULL;
- return -1;
+ DPAA2_BUS_ERR("DPAA2 MC version not compatible!"
+ " Expected %d.%d.x, Detected %d.%d.%d",
+ MC_VER_MAJOR, MC_VER_MINOR,
+ mc_ver_info.major, mc_ver_info.minor,
+ mc_ver_info.revision);
+ ret = -1;
+ goto cleanup;
}
rte_mcp_ptr_list[0] = (void *)v_addr;
+ free(dev_name);
return 0;
+
+cleanup:
+ if (dev_name)
+ free(dev_name);
+
+ if (rte_mcp_ptr_list) {
+ free(rte_mcp_ptr_list);
+ rte_mcp_ptr_list = NULL;
+ }
+
+ return ret;
}
int
@@ -530,7 +643,7 @@ fslmc_vfio_process_group(void)
if (dev->dev_type == DPAA2_MPORTAL) {
ret = fslmc_process_mcp(dev);
if (ret) {
- FSLMC_VFIO_LOG(DEBUG, "Unable to map Portal.");
+ DPAA2_BUS_ERR("Unable to map MC Portal");
return -1;
}
if (!found_mportal)
@@ -547,23 +660,19 @@ fslmc_vfio_process_group(void)
/* Cannot continue if there is not even a single mportal */
if (!found_mportal) {
- FSLMC_VFIO_LOG(DEBUG,
- "No MC Portal device found. Not continuing.");
+ DPAA2_BUS_ERR("No MC Portal device found. Not continuing");
return -1;
}
TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
- if (!dev)
- break;
-
switch (dev->dev_type) {
case DPAA2_ETH:
case DPAA2_CRYPTO:
+ case DPAA2_QDMA:
ret = fslmc_process_iodevices(dev);
if (ret) {
- FSLMC_VFIO_LOG(DEBUG,
- "Dev (%s) init failed.",
- dev->device.name);
+ DPAA2_BUS_DEBUG("Dev (%s) init failed",
+ dev->device.name);
return ret;
}
break;
@@ -576,9 +685,8 @@ fslmc_vfio_process_group(void)
*/
ret = fslmc_process_iodevices(dev);
if (ret) {
- FSLMC_VFIO_LOG(DEBUG,
- "Dev (%s) init failed.",
- dev->device.name);
+ DPAA2_BUS_DEBUG("Dev (%s) init failed",
+ dev->device.name);
return -1;
}
@@ -592,8 +700,8 @@ fslmc_vfio_process_group(void)
case DPAA2_UNKNOWN:
default:
/* Unknown - ignore */
- FSLMC_VFIO_LOG(DEBUG, "Found unknown device (%s).",
- dev->device.name);
+ DPAA2_BUS_DEBUG("Found unknown device (%s)",
+ dev->device.name);
TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
free(dev);
dev = NULL;
@@ -622,12 +730,12 @@ fslmc_vfio_setup_group(void)
* processing.
*/
if (vfio_group.groupid == groupid) {
- FSLMC_VFIO_LOG(ERR, "groupid already exists %d", groupid);
+ DPAA2_BUS_ERR("groupid already exists %d", groupid);
return 0;
}
/* Get the actual group fd */
- ret = vfio_get_group_fd(groupid);
+ ret = rte_vfio_get_group_fd(groupid);
if (ret < 0)
return ret;
vfio_group.fd = ret;
@@ -635,14 +743,14 @@ fslmc_vfio_setup_group(void)
/* Check group viability */
ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status);
if (ret) {
- FSLMC_VFIO_LOG(ERR, "VFIO error getting group status");
+ DPAA2_BUS_ERR("VFIO error getting group status");
close(vfio_group.fd);
rte_vfio_clear_group(vfio_group.fd);
return ret;
}
if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
- FSLMC_VFIO_LOG(ERR, "VFIO group not viable");
+ DPAA2_BUS_ERR("VFIO group not viable");
close(vfio_group.fd);
rte_vfio_clear_group(vfio_group.fd);
return -EPERM;
@@ -655,7 +763,7 @@ fslmc_vfio_setup_group(void)
/* Now connect this IOMMU group to given container */
ret = vfio_connect_container();
if (ret) {
- FSLMC_VFIO_LOG(ERR,
+ DPAA2_BUS_ERR(
"Error connecting container with groupid %d",
groupid);
close(vfio_group.fd);
@@ -667,15 +775,15 @@ fslmc_vfio_setup_group(void)
/* Get Device information */
ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container);
if (ret < 0) {
- FSLMC_VFIO_LOG(ERR, "Error getting device %s fd from group %d",
- g_container, vfio_group.groupid);
+ DPAA2_BUS_ERR("Error getting device %s fd from group %d",
+ g_container, vfio_group.groupid);
close(vfio_group.fd);
rte_vfio_clear_group(vfio_group.fd);
return ret;
}
container_device_fd = ret;
- FSLMC_VFIO_LOG(DEBUG, "VFIO Container FD is [0x%X]",
- container_device_fd);
+ DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]",
+ container_device_fd);
return 0;
}
diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h
index e8fb3445..9e2c4fee 100644
--- a/drivers/bus/fslmc/fslmc_vfio.h
+++ b/drivers/bus/fslmc/fslmc_vfio.h
@@ -10,8 +10,6 @@
#include <rte_vfio.h>
-#include "eal_vfio.h"
-
#define DPAA2_MC_DPNI_DEVID 7
#define DPAA2_MC_DPSECI_DEVID 3
#define DPAA2_MC_DPCON_DEVID 5
diff --git a/drivers/bus/fslmc/mc/dpdmai.c b/drivers/bus/fslmc/mc/dpdmai.c
new file mode 100644
index 00000000..528889df
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpdmai.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpdmai.h>
+#include <fsl_dpdmai_cmd.h>
+
+/**
+ * dpdmai_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: DPDMAI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpdmai_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpdmai_id,
+ uint16_t *token)
+{
+ struct dpdmai_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpdmai_cmd_open *)cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_create() - Create the DPDMAI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPDMAI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpdmai_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpdmai_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmai_cmd_create *)cmd.params;
+ cmd_params->priorities[0] = cfg->priorities[0];
+ cmd_params->priorities[1] = cfg->priorities[1];
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpdmai_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpdmai_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmai_cmd_destroy *)cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpdmai_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_is_enabled *)cmd.params;
+ *en = dpdmai_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpdmai_attr *attr)
+{
+ struct dpdmai_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+
+/**
+ * dpdmai_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation; use
+ * DPDMAI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpdmai_rx_queue_cfg *cfg)
+{
+ struct dpdmai_cmd_set_rx_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->priority = priority;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpdmai_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_rx_queue_attr *attr)
+{
+ struct dpdmai_cmd_get_queue *cmd_params;
+ struct dpdmai_rsp_get_rx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = le32_to_cpu(rsp_params->dest_priority);
+ attr->dest_cfg.dest_type = dpdmai_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+
+ return 0;
+}
+
+/**
+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_tx_queue_attr *attr)
+{
+ struct dpdmai_cmd_get_queue *cmd_params;
+ struct dpdmai_rsp_get_tx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai.h b/drivers/bus/fslmc/mc/fsl_dpdmai.h
new file mode 100644
index 00000000..03e46ec1
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __FSL_DPDMAI_H
+#define __FSL_DPDMAI_H
+
+struct fsl_mc_io;
+
+/* Data Path DMA Interface API
+ * Contains initialization APIs and runtime control APIs for DPDMAI
+ */
+
+/* General DPDMAI macros */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPDMAI object
+ */
+#define DPDMAI_PRIO_NUM 2
+
+/**
+ * All queues considered; see dpdmai_set_rx_queue()
+ */
+#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
+
+int dpdmai_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpdmai_id,
+ uint16_t *token);
+
+int dpdmai_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
+ * configured with values 1-8; the entry following last valid entry
+ * should be configured with 0
+ */
+struct dpdmai_cfg {
+ uint8_t priorities[DPDMAI_PRIO_NUM];
+};
+
+int dpdmai_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpdmai_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpdmai_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpdmai_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpdmai_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpdmai_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpdmai_attr - Structure representing DPDMAI attributes
+ * @id: DPDMAI object ID
+ * @num_of_priorities: number of priorities
+ */
+struct dpdmai_attr {
+ int id;
+ uint8_t num_of_priorities;
+};
+
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpdmai_attr *attr);
+
+/**
+ * enum dpdmai_dest - DPDMAI destination types
+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpdmai_dest {
+ DPDMAI_DEST_NONE = 0,
+ DPDMAI_DEST_DPIO = 1,
+ DPDMAI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPDMAI_DEST_NONE' option
+ */
+struct dpdmai_dest_cfg {
+ enum dpdmai_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/* DPDMAI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPDMAI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpdmai_rx_queue_cfg {
+ uint32_t options;
+ uint64_t user_ctx;
+ struct dpdmai_dest_cfg dest_cfg;
+
+};
+
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpdmai_rx_queue_cfg *cfg);
+
+/**
+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpdmai_rx_queue_attr {
+ uint64_t user_ctx;
+ struct dpdmai_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_rx_queue_attr *attr);
+
+/**
+ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to DMA hardware
+ */
+
+struct dpdmai_tx_queue_attr {
+ uint32_t fqid;
+};
+
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_tx_queue_attr *attr);
+
+#endif /* __FSL_DPDMAI_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
new file mode 100644
index 00000000..618e19ea
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _FSL_DPDMAI_CMD_H
+#define _FSL_DPDMAI_CMD_H
+
+/* DPDMAI Version */
+#define DPDMAI_VER_MAJOR 3
+#define DPDMAI_VER_MINOR 2
+
+/* Command versioning */
+#define DPDMAI_CMD_BASE_VERSION 1
+#define DPDMAI_CMD_ID_OFFSET 4
+
+#define DPDMAI_CMD(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPDMAI_CMDID_CLOSE DPDMAI_CMD(0x800)
+#define DPDMAI_CMDID_OPEN DPDMAI_CMD(0x80E)
+#define DPDMAI_CMDID_CREATE DPDMAI_CMD(0x90E)
+#define DPDMAI_CMDID_DESTROY DPDMAI_CMD(0x98E)
+#define DPDMAI_CMDID_GET_API_VERSION DPDMAI_CMD(0xa0E)
+
+#define DPDMAI_CMDID_ENABLE DPDMAI_CMD(0x002)
+#define DPDMAI_CMDID_DISABLE DPDMAI_CMD(0x003)
+#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD(0x004)
+#define DPDMAI_CMDID_RESET DPDMAI_CMD(0x005)
+#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMD(0x006)
+
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD(0x1A2)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPDMAI_MASK(field) \
+ GENMASK(DPDMAI_##field##_SHIFT + DPDMAI_##field##_SIZE - 1, \
+ DPDMAI_##field##_SHIFT)
+#define dpdmai_set_field(var, field, val) \
+ ((var) |= (((val) << DPDMAI_##field##_SHIFT) & DPDMAI_MASK(field)))
+#define dpdmai_get_field(var, field) \
+ (((var) & DPDMAI_MASK(field)) >> DPDMAI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpdmai_cmd_open {
+ uint32_t dpdmai_id;
+};
+
+struct dpdmai_cmd_create {
+ uint8_t pad;
+ uint8_t priorities[2];
+};
+
+struct dpdmai_cmd_destroy {
+ uint32_t dpdmai_id;
+};
+
+#define DPDMAI_ENABLE_SHIFT 0
+#define DPDMAI_ENABLE_SIZE 1
+
+struct dpdmai_rsp_is_enabled {
+ /* only the LSB bit */
+ uint8_t en;
+};
+
+struct dpdmai_rsp_get_attr {
+ uint32_t id;
+ uint8_t num_of_priorities;
+};
+
+#define DPDMAI_DEST_TYPE_SHIFT 0
+#define DPDMAI_DEST_TYPE_SIZE 4
+
+struct dpdmai_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t priority;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+};
+
+struct dpdmai_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t priority;
+};
+
+struct dpdmai_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad1;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad2;
+ uint64_t user_ctx;
+ uint32_t fqid;
+};
+
+struct dpdmai_rsp_get_tx_queue {
+ uint64_t pad;
+ uint32_t fqid;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPDMAI_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
index a3c3e798..ac919610 100644
--- a/drivers/bus/fslmc/mc/fsl_mc_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -27,7 +27,7 @@
#define le32_to_cpu rte_le_to_cpu_32
#define le16_to_cpu rte_le_to_cpu_16
-#define BITS_PER_LONG 64
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#define GENMASK(h, l) \
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build
new file mode 100644
index 00000000..22a56a6f
--- /dev/null
+++ b/drivers/bus/fslmc/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['eventdev', 'kvargs']
+sources = files('fslmc_bus.c',
+ 'fslmc_vfio.c',
+ 'mc/dpbp.c',
+ 'mc/dpci.c',
+ 'mc/dpcon.c',
+ 'mc/dpdmai.c',
+ 'mc/dpio.c',
+ 'mc/dpmng.c',
+ 'mc/mc_sys.c',
+ 'portal/dpaa2_hw_dpbp.c',
+ 'portal/dpaa2_hw_dpci.c',
+ 'portal/dpaa2_hw_dpio.c',
+ 'qbman/qbman_portal.c',
+ 'qbman/qbman_debug.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('mc', 'qbman/include', 'portal')
+cflags += ['-D_GNU_SOURCE']
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index f1f14e29..39c5adf9 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -44,7 +44,7 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
/* Allocate DPAA2 dpbp handle */
dpbp_node = rte_malloc(NULL, sizeof(struct dpaa2_dpbp_dev), 0);
if (!dpbp_node) {
- PMD_INIT_LOG(ERR, "Memory allocation failed for DPBP Device");
+ DPAA2_BUS_ERR("Memory allocation failed for DPBP Device");
return -1;
}
@@ -53,8 +53,8 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
ret = dpbp_open(&dpbp_node->dpbp,
CMD_PRI_LOW, dpbp_id, &dpbp_node->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
- ret);
+ DPAA2_BUS_ERR("Unable to open buffer pool object: err(%d)",
+ ret);
rte_free(dpbp_node);
return -1;
}
@@ -62,8 +62,8 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
/* Clean the device first */
ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure cleaning dpbp device with"
- " error code %d\n", ret);
+ DPAA2_BUS_ERR("Unable to reset buffer pool device. err(%d)",
+ ret);
dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
rte_free(dpbp_node);
return -1;
@@ -74,8 +74,6 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
TAILQ_INSERT_TAIL(&dpbp_dev_list, dpbp_node, next);
- RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpbp.%d]\n", dpbp_id);
-
if (!register_once) {
rte_mbuf_set_platform_mempool_ops(DPAA2_MEMPOOL_OPS_NAME);
register_once = 1;
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index fb28e497..5ad0374d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -39,13 +39,14 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
struct dpci_attr attr;
struct dpci_rx_queue_cfg rx_queue_cfg;
struct dpci_rx_queue_attr rx_attr;
+ struct dpci_tx_queue_attr tx_attr;
int ret, i;
/* Allocate DPAA2 dpci handle */
dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0);
if (!dpci_node) {
- PMD_INIT_LOG(ERR, "Memory allocation failed for DPCI Device");
- return -1;
+ DPAA2_BUS_ERR("Memory allocation failed for DPCI Device");
+ return -ENOMEM;
}
/* Open the dpci object */
@@ -53,43 +54,57 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
ret = dpci_open(&dpci_node->dpci,
CMD_PRI_LOW, dpci_id, &dpci_node->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret);
+ goto err;
}
/* Get the device attributes */
ret = dpci_get_attributes(&dpci_node->dpci,
CMD_PRI_LOW, dpci_node->token, &attr);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Reading device failed with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ DPAA2_BUS_ERR("Reading device failed with err code: %d", ret);
+ goto err;
}
- /* Set up the Rx Queue */
- memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
- ret = dpci_set_rx_queue(&dpci_node->dpci,
- CMD_PRI_LOW,
- dpci_node->token,
- 0, &rx_queue_cfg);
- if (ret) {
- PMD_INIT_LOG(ERR, "Setting Rx queue failed with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ struct dpaa2_queue *rxq;
+
+ memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
+ ret = dpci_set_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token,
+ i, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_BUS_ERR("Setting Rx queue failed with err code: %d",
+ ret);
+ goto err;
+ }
+
+ /* Allocate DQ storage for the DPCI Rx queues */
+ rxq = &(dpci_node->rx_queue[i]);
+ rxq->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_BUS_ERR("q_storage allocation failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n");
+ goto err;
+ }
}
/* Enable the device */
ret = dpci_enable(&dpci_node->dpci,
CMD_PRI_LOW, dpci_node->token);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Enabling device failed with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ DPAA2_BUS_ERR("Enabling device failed with err code: %d", ret);
+ goto err;
}
for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
@@ -99,14 +114,22 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
dpci_node->token, i,
&rx_attr);
if (ret != 0) {
- PMD_INIT_LOG(ERR,
- "Reading device failed with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ DPAA2_BUS_ERR("Rx queue fetch failed with err code: %d",
+ ret);
+ goto err;
}
+ dpci_node->rx_queue[i].fqid = rx_attr.fqid;
- dpci_node->queue[i].fqid = rx_attr.fqid;
+ ret = dpci_get_tx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token, i,
+ &tx_attr);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Reading device failed with err code: %d",
+ ret);
+ goto err;
+ }
+ dpci_node->tx_queue[i].fqid = tx_attr.fqid;
}
dpci_node->dpci_id = dpci_id;
@@ -114,9 +137,20 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next);
- RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpci.%d]\n", dpci_id);
-
return 0;
+
+err:
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ }
+ }
+ rte_free(dpci_node);
+
+ return ret;
}
struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void)
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index eefde155..99f70be1 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -101,7 +101,7 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
snprintf(string, STRING_LEN, "dpio.%d", dpio_id);
file = fopen("/proc/interrupts", "r");
if (!file) {
- PMD_DRV_LOG(WARNING, "Failed to open /proc/interrupts file\n");
+ DPAA2_BUS_WARN("Failed to open /proc/interrupts file");
return;
}
while (getline(&temp, &len, file) != -1) {
@@ -112,8 +112,8 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
}
if (!token) {
- PMD_DRV_LOG(WARNING, "Failed to get interrupt id for dpio.%d\n",
- dpio_id);
+ DPAA2_BUS_WARN("Failed to get interrupt id for dpio.%d",
+ dpio_id);
if (temp)
free(temp);
fclose(file);
@@ -125,10 +125,10 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
cpu_mask, token);
ret = system(command);
if (ret < 0)
- PMD_DRV_LOG(WARNING,
- "Failed to affine interrupts on respective core\n");
+ DPAA2_BUS_WARN(
+ "Failed to affine interrupts on respective core");
else
- PMD_DRV_LOG(WARNING, " %s command is executed\n", command);
+ DPAA2_BUS_DEBUG(" %s command is executed", command);
free(temp);
fclose(file);
@@ -143,7 +143,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
dpio_epoll_fd = epoll_create(1);
ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0);
if (ret) {
- PMD_DRV_LOG(ERR, "Interrupt registeration failed\n");
+ DPAA2_BUS_ERR("Interrupt registeration failed");
return -1;
}
@@ -166,7 +166,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "epoll_ctl failed\n");
+ DPAA2_BUS_ERR("epoll_ctl failed");
return -1;
}
dpio_dev->epoll_fd = dpio_epoll_fd;
@@ -185,28 +185,27 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
if (!dpio_dev->dpio) {
- PMD_INIT_LOG(ERR, "Memory allocation failure\n");
+ DPAA2_BUS_ERR("Memory allocation failure");
return -1;
}
- PMD_DRV_LOG(DEBUG, "Allocated DPIO Portal[%p]", dpio_dev->dpio);
dpio_dev->dpio->regs = dpio_dev->mc_portal;
if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
&dpio_dev->token)) {
- PMD_INIT_LOG(ERR, "Failed to allocate IO space\n");
+ DPAA2_BUS_ERR("Failed to allocate IO space");
free(dpio_dev->dpio);
return -1;
}
if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
- PMD_INIT_LOG(ERR, "Failed to reset dpio\n");
+ DPAA2_BUS_ERR("Failed to reset dpio");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
- PMD_INIT_LOG(ERR, "Failed to Enable dpio\n");
+ DPAA2_BUS_ERR("Failed to Enable dpio");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
@@ -214,7 +213,7 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
dpio_dev->token, &attr)) {
- PMD_INIT_LOG(ERR, "DPIO Get attribute failed\n");
+ DPAA2_BUS_ERR("DPIO Get attribute failed");
dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
@@ -231,7 +230,7 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
dpio_dev->sw_portal = qbman_swp_init(&p_des);
if (dpio_dev->sw_portal == NULL) {
- PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n");
+ DPAA2_BUS_ERR("QBMan SW Portal Init failed");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
@@ -249,7 +248,7 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
if (cpu_id < 0) {
cpu_id = rte_get_master_lcore();
if (cpu_id < 0) {
- RTE_LOG(ERR, PMD, "\tGetting CPU Index failed\n");
+ DPAA2_BUS_ERR("Getting CPU Index failed");
return -1;
}
}
@@ -258,19 +257,19 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
*/
sdest = dpaa2_core_cluster_sdest(cpu_id);
- PMD_DRV_LOG(DEBUG, "Portal= %d CPU= %u SDEST= %d",
- dpio_dev->index, cpu_id, sdest);
+ DPAA2_BUS_DEBUG("Portal= %d CPU= %u SDEST= %d",
+ dpio_dev->index, cpu_id, sdest);
ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
dpio_dev->token, sdest);
if (ret) {
- PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret);
+ DPAA2_BUS_ERR("%d ERROR in SDEST", ret);
return -1;
}
#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
if (dpaa2_dpio_intr_init(dpio_dev)) {
- PMD_DRV_LOG(ERR, "Interrupt registration failed for dpio\n");
+ DPAA2_BUS_ERR("Interrupt registration failed for dpio");
return -1;
}
#endif
@@ -291,12 +290,12 @@ struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id)
if (!dpio_dev)
return NULL;
- PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu",
- dpio_dev, dpio_dev->index, syscall(SYS_gettid));
+ DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu",
+ dpio_dev, dpio_dev->index, syscall(SYS_gettid));
ret = dpaa2_configure_stashing(dpio_dev, cpu_id);
if (ret)
- PMD_DRV_LOG(ERR, "dpaa2_configure_stashing failed");
+ DPAA2_BUS_ERR("dpaa2_configure_stashing failed");
return dpio_dev;
}
@@ -314,8 +313,9 @@ dpaa2_affine_qbman_swp(void)
return -1;
if (dpaa2_io_portal[lcore_id].dpio_dev) {
- PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
- " between thread %lu and current %lu",
+ DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared"
+ " between thread %" PRIu64 " and current "
+ "%" PRIu64 "\n",
dpaa2_io_portal[lcore_id].dpio_dev,
dpaa2_io_portal[lcore_id].dpio_dev->index,
dpaa2_io_portal[lcore_id].net_tid,
@@ -326,7 +326,8 @@ dpaa2_affine_qbman_swp(void)
[lcore_id].dpio_dev->ref_count);
dpaa2_io_portal[lcore_id].net_tid = tid;
- PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
+ DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - "
+ "%" PRIu64 "\n",
dpaa2_io_portal[lcore_id].dpio_dev,
dpaa2_io_portal[lcore_id].dpio_dev->index,
tid);
@@ -348,7 +349,7 @@ dpaa2_affine_qbman_swp(void)
}
int
-dpaa2_affine_qbman_swp_sec(void)
+dpaa2_affine_qbman_ethrx_swp(void)
{
unsigned int lcore_id = rte_lcore_id();
uint64_t tid = syscall(SYS_gettid);
@@ -359,32 +360,36 @@ dpaa2_affine_qbman_swp_sec(void)
else if (lcore_id >= RTE_MAX_LCORE)
return -1;
- if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
- PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
- " between thread %lu and current %lu",
- dpaa2_io_portal[lcore_id].sec_dpio_dev,
- dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
- dpaa2_io_portal[lcore_id].sec_tid,
- tid);
- RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
- = dpaa2_io_portal[lcore_id].sec_dpio_dev;
+ if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
+ DPAA2_BUS_DP_INFO(
+ "DPAA Portal=%p (%d) is being shared between thread"
+ " %" PRIu64 " and current %" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
+ dpaa2_io_portal[lcore_id].sec_tid,
+ tid);
+ RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+ = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
rte_atomic16_inc(&dpaa2_io_portal
- [lcore_id].sec_dpio_dev->ref_count);
+ [lcore_id].ethrx_dpio_dev->ref_count);
dpaa2_io_portal[lcore_id].sec_tid = tid;
- PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
- dpaa2_io_portal[lcore_id].sec_dpio_dev,
- dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
- tid);
+ DPAA2_BUS_DP_DEBUG(
+ "Old Portal=%p (%d) affined thread"
+ " - %" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
+ tid);
return 0;
}
/* Populate the dpaa2_io_portal structure */
- dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp(lcore_id);
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev =
+ dpaa2_get_qbman_swp(lcore_id);
- if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
- RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
- = dpaa2_io_portal[lcore_id].sec_dpio_dev;
+ if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
+ RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+ = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
dpaa2_io_portal[lcore_id].sec_tid = tid;
return 0;
} else {
@@ -401,15 +406,14 @@ dpaa2_create_dpio_device(int vdev_fd,
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
if (obj_info->num_regions < NUM_DPIO_REGIONS) {
- PMD_INIT_LOG(ERR, "ERROR, Not sufficient number "
- "of DPIO regions.\n");
+ DPAA2_BUS_ERR("Not sufficient number of DPIO regions");
return -1;
}
dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev),
RTE_CACHE_LINE_SIZE);
if (!dpio_dev) {
- PMD_INIT_LOG(ERR, "Memory allocation failed for DPIO Device\n");
+ DPAA2_BUS_ERR("Memory allocation failed for DPIO Device");
return -1;
}
@@ -421,31 +425,31 @@ dpaa2_create_dpio_device(int vdev_fd,
reg_info.index = 0;
if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
- PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
+ DPAA2_BUS_ERR("vfio: error getting region info");
rte_free(dpio_dev);
return -1;
}
dpio_dev->ce_size = reg_info.size;
- dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
+ dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
vdev_fd, reg_info.offset);
reg_info.index = 1;
if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
- PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
+ DPAA2_BUS_ERR("vfio: error getting region info");
rte_free(dpio_dev);
return -1;
}
dpio_dev->ci_size = reg_info.size;
- dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
+ dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
vdev_fd, reg_info.offset);
if (configure_dpio_qbman_swp(dpio_dev)) {
- PMD_INIT_LOG(ERR,
- "Fail to configure the dpio qbman portal for %d\n",
+ DPAA2_BUS_ERR(
+ "Fail to configure the dpio qbman portal for %d",
dpio_dev->hw_id);
rte_free(dpio_dev);
return -1;
@@ -455,8 +459,8 @@ dpaa2_create_dpio_device(int vdev_fd,
dpio_dev->index = io_space_count;
if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
- PMD_INIT_LOG(ERR, "Fail to setup interrupt for %d\n",
- dpio_dev->hw_id);
+ DPAA2_BUS_ERR("Fail to setup interrupt for %d",
+ dpio_dev->hw_id);
rte_free(dpio_dev);
}
@@ -466,21 +470,20 @@ dpaa2_create_dpio_device(int vdev_fd,
if (mc_get_soc_version(dpio_dev->dpio,
CMD_PRI_LOW, &mc_plat_info)) {
- PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
+ DPAA2_BUS_ERR("Unable to get SoC version information");
} else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) {
dpaa2_core_cluster_base = 0x02;
dpaa2_cluster_sz = 4;
- PMD_INIT_LOG(DEBUG, "\tLS108x (A53) Platform Detected");
+ DPAA2_BUS_DEBUG("LS108x (A53) Platform Detected");
} else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) {
dpaa2_core_cluster_base = 0x00;
dpaa2_cluster_sz = 2;
- PMD_INIT_LOG(DEBUG, "\tLX2160 Platform Detected");
+ DPAA2_BUS_DEBUG("LX2160 Platform Detected");
}
dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000);
}
TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
- RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpio.%d]\n", object_id);
return 0;
}
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index c0bd8782..d593eea7 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -13,7 +13,7 @@
struct dpaa2_io_portal_t {
struct dpaa2_dpio_dev *dpio_dev;
- struct dpaa2_dpio_dev *sec_dpio_dev;
+ struct dpaa2_dpio_dev *ethrx_dpio_dev;
uint64_t net_tid;
uint64_t sec_tid;
void *eventdev;
@@ -25,8 +25,8 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
#define DPAA2_PER_LCORE_DPIO RTE_PER_LCORE(_dpaa2_io).dpio_dev
#define DPAA2_PER_LCORE_PORTAL DPAA2_PER_LCORE_DPIO->sw_portal
-#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
-#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal
+#define DPAA2_PER_LCORE_ETHRX_DPIO RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+#define DPAA2_PER_LCORE_ETHRX_PORTAL DPAA2_PER_LCORE_ETHRX_DPIO->sw_portal
/* Variable to store DPAA2 platform type */
extern uint32_t dpaa2_svr_family;
@@ -39,7 +39,7 @@ struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id);
int dpaa2_affine_qbman_swp(void);
/* Affine additional DPIO portal to current crypto processing thread */
-int dpaa2_affine_qbman_swp_sec(void);
+int dpaa2_affine_qbman_ethrx_swp(void);
/* allocate memory for FQ - dq storage */
int
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index d421dbf0..82075936 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -142,7 +142,8 @@ struct dpaa2_dpci_dev {
uint16_t token;
rte_atomic16_t in_use;
uint32_t dpci_id; /*HW ID for DPCI object */
- struct dpaa2_queue queue[DPAA2_DPCI_MAX_QUEUES];
+ struct dpaa2_queue rx_queue[DPAA2_DPCI_MAX_QUEUES];
+ struct dpaa2_queue tx_queue[DPAA2_DPCI_MAX_QUEUES];
};
/*! Global MCP list */
@@ -174,7 +175,7 @@ enum qbman_fd_format {
};
/*Macros to define operations on FD*/
#define DPAA2_SET_FD_ADDR(fd, addr) do { \
- (fd)->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \
+ (fd)->simple.addr_lo = lower_32_bits((size_t)(addr)); \
(fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
} while (0)
#define DPAA2_SET_FD_LEN(fd, length) ((fd)->simple.len = length)
@@ -188,50 +189,55 @@ enum qbman_fd_format {
((fd)->simple.frc = (0x80000000 | (len)))
#define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \
((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16))
-#define DPAA2_SET_FD_FRC(fd, frc) ((fd)->simple.frc = frc)
+#define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc)
#define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)
#define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
#define DPAA2_SET_FD_FLC(fd, addr) do { \
- (fd)->simple.flc_lo = lower_32_bits((uint64_t)(addr)); \
+ (fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \
(fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
} while (0)
#define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len)))
#define DPAA2_GET_FLE_ADDR(fle) \
- (uint64_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
+ (size_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
#define DPAA2_SET_FLE_ADDR(fle, addr) do { \
- (fle)->addr_lo = lower_32_bits((uint64_t)addr); \
- (fle)->addr_hi = upper_32_bits((uint64_t)addr); \
+ (fle)->addr_lo = lower_32_bits((size_t)addr); \
+ (fle)->addr_hi = upper_32_bits((uint64_t)addr); \
} while (0)
#define DPAA2_GET_FLE_CTXT(fle) \
- (uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
- (fle)->reserved[0])
+ ((((uint64_t)((fle)->reserved[1])) << 32) + (fle)->reserved[0])
#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
- (fle)->reserved[0] = lower_32_bits((uint64_t)addr); \
- (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \
+ (fle)->reserved[0] = lower_32_bits((size_t)addr); \
+ (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \
} while (0)
#define DPAA2_SET_FLE_OFFSET(fle, offset) \
((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
-#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
+#define DPAA2_SET_FLE_LEN(fle, len) ((fle)->length = len)
+#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (size_t)bpid)
#define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
-#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 31)
+#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= 1 << 31)
#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
+#define DPAA2_SET_FLE_BMT(fle) (((fle)->fin_bpid_offset |= 0x00008000))
#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
((fd)->simple.bpid_offset |= (uint32_t)1 << 28)
#define DPAA2_GET_FD_ADDR(fd) \
-((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
+(((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
#define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
#define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
#define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14)
#define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_GET_FD_FRC(fd) ((fd)->simple.frc)
+#define DPAA2_GET_FD_FLC(fd) \
+ (((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo)
+#define DPAA2_GET_FD_ERR(fd) ((fd)->simple.bpid_offset & 0x000000FF)
#define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
#define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29)
#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
(((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
#define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
- ((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size)))
+ ((struct rte_mbuf *)((size_t)(buf) - (meta_data_size)))
#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
@@ -255,47 +261,53 @@ enum qbman_fd_format {
*/
#define DPAA2_EQ_RESP_ALWAYS 1
+/* Various structures representing contiguous memory maps */
+struct dpaa2_memseg {
+ TAILQ_ENTRY(dpaa2_memseg) next;
+ char *vaddr;
+ rte_iova_t iova;
+ size_t len;
+};
+
+TAILQ_HEAD(dpaa2_memseg_list, dpaa2_memseg);
+extern struct dpaa2_memseg_list rte_dpaa2_memsegs;
+
#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
extern uint8_t dpaa2_virt_mode;
static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
/* todo - this is costly, need to write a fast coversion routine */
static void *dpaa2_mem_ptov(phys_addr_t paddr)
{
- const struct rte_memseg *memseg;
- int i;
+ struct dpaa2_memseg *ms;
if (dpaa2_virt_mode)
- return (void *)paddr;
-
- memseg = rte_eal_get_physmem_layout();
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (paddr >= memseg[i].iova &&
- (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
- return (void *)(memseg[i].addr_64
- + (paddr - memseg[i].iova));
+ return (void *)(size_t)paddr;
+
+ /* Check if the address is already part of the memseg list internally
+ * maintained by the dpaa2 driver.
+ */
+ TAILQ_FOREACH(ms, &rte_dpaa2_memsegs, next) {
+ if (paddr >= ms->iova && paddr <
+ ms->iova + ms->len)
+ return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
}
- return NULL;
+
+ /* If not, Fallback to full memseg list searching */
+ return rte_mem_iova2virt(paddr);
}
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
{
const struct rte_memseg *memseg;
- int i;
if (dpaa2_virt_mode)
return vaddr;
- memseg = rte_eal_get_physmem_layout();
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (vaddr >= memseg[i].addr_64 &&
- vaddr < memseg[i].addr_64 + memseg[i].len)
- return memseg[i].iova
- + (vaddr - memseg[i].addr_64);
- }
- return (phys_addr_t)(NULL);
+ memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
+ if (memseg)
+ return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
+ return (size_t)NULL;
}
/**
@@ -306,28 +318,26 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
*/
#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
-#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr)
/**
* macro to convert Virtual address to IOVA
*/
-#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr))
/**
* macro to convert IOVA to Virtual address
*/
-#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova))
/**
* macro to convert modify the memory containing IOVA to Virtual address
*/
#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
- {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
+ {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); }
#else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
-#define DPAA2_OP_VADDR_TO_IOVA(op) (op)
#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
#define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 96269ed4..bb60a98f 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -6,8 +6,6 @@
#ifndef _FSL_QBMAN_BASE_H
#define _FSL_QBMAN_BASE_H
-typedef uint64_t dma_addr_t;
-
/**
* DOC: QBMan basic structures
*
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index e2217335..713ec965 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -553,10 +553,9 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi;
- addr_cena = (uint64_t)s->sys.addr_cena;
+ addr_cena = (size_t)s->sys.addr_cena;
for (i = 0; i < num_enqueued; i++) {
- dcbf((uint64_t *)(addr_cena +
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
eqcr_pi++;
eqcr_pi &= 0xF;
}
@@ -620,10 +619,9 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi;
- addr_cena = (uint64_t)s->sys.addr_cena;
+ addr_cena = (size_t)s->sys.addr_cena;
for (i = 0; i < num_enqueued; i++) {
- dcbf((uint64_t *)(addr_cena +
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
eqcr_pi++;
eqcr_pi &= 0xF;
}
@@ -690,7 +688,7 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
dma_addr_t storage_phys,
int stash)
{
- d->pull.rsp_addr_virt = (uint64_t)storage;
+ d->pull.rsp_addr_virt = (size_t)storage;
if (!storage) {
d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
@@ -749,7 +747,7 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
}
d->pull.tok = s->sys.idx + 1;
- s->vdq.storage = (void *)d->pull.rsp_addr_virt;
+ s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
memcpy(&p[1], &cl[1], 12);
diff --git a/drivers/bus/fslmc/qbman/qbman_sys.h b/drivers/bus/fslmc/qbman/qbman_sys.h
index 846788ea..2bd33ea5 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys.h
@@ -20,6 +20,9 @@
#include "qbman_sys_decl.h"
+#define CENA_WRITE_ENABLE 0
+#define CINH_WRITE_ENABLE 1
+
/* Debugging assists */
static inline void __hexdump(unsigned long start, unsigned long end,
unsigned long p, size_t sz, const unsigned char *c)
@@ -178,7 +181,11 @@ static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
s->addr_cena, s->idx, offset);
#endif
QBMAN_BUG_ON(offset & 63);
+#ifdef RTE_ARCH_64
return (s->addr_cena + offset);
+#else
+ return (s->addr_cinh + offset);
+#endif
}
static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
@@ -191,11 +198,19 @@ static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
s->addr_cena, s->idx, offset, shadow);
hexdump(cmd, 64);
#endif
+#ifdef RTE_ARCH_64
for (loop = 15; loop >= 1; loop--)
__raw_writel(shadow[loop], s->addr_cena +
offset + loop * 4);
lwsync();
__raw_writel(shadow[0], s->addr_cena + offset);
+#else
+ for (loop = 15; loop >= 1; loop--)
+ __raw_writel(shadow[loop], s->addr_cinh +
+ offset + loop * 4);
+ lwsync();
+ __raw_writel(shadow[0], s->addr_cinh + offset);
+#endif
dcbf(s->addr_cena + offset);
}
@@ -224,9 +239,15 @@ static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
s->addr_cena, s->idx, offset, shadow);
#endif
+#ifdef RTE_ARCH_64
for (loop = 0; loop < 16; loop++)
shadow[loop] = __raw_readl(s->addr_cena + offset
+ loop * 4);
+#else
+ for (loop = 0; loop < 16; loop++)
+ shadow[loop] = __raw_readl(s->addr_cinh + offset
+ + loop * 4);
+#endif
#ifdef QBMAN_CENA_TRACE
hexdump(shadow, 64);
#endif
@@ -313,6 +334,11 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
uint8_t dqrr_size)
{
uint32_t reg;
+#ifdef RTE_ARCH_64
+ uint8_t wn = CENA_WRITE_ENABLE;
+#else
+ uint8_t wn = CINH_WRITE_ENABLE;
+#endif
s->addr_cena = d->cena_bar;
s->addr_cinh = d->cinh_bar;
@@ -333,10 +359,10 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
QBMAN_BUG_ON(reg);
#endif
if (s->eqcr_mode == qman_eqcr_vb_array)
- reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1,
+ reg = qbman_set_swp_cfg(dqrr_size, wn, 0, 3, 2, 3, 1, 1, 1, 1,
1, 1);
else
- reg = qbman_set_swp_cfg(dqrr_size, 0, 1, 3, 2, 2, 1, 1, 1, 1,
+ reg = qbman_set_swp_cfg(dqrr_size, wn, 1, 3, 2, 2, 1, 1, 1, 1,
1, 1);
qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
diff --git a/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
index f82bb18c..fa6977fe 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys_decl.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
@@ -15,6 +15,7 @@
/****************/
/* arch assists */
/****************/
+#if defined(RTE_ARCH_ARM64)
#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
#define lwsync() { asm volatile("dmb st" : : : "memory"); }
#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
@@ -28,3 +29,25 @@ static inline void prefetch_for_store(void *p)
{
asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p));
}
+#elif defined(RTE_ARCH_ARM)
+#define dcbz(p) memset(p, 0, 64)
+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
+#define dcbf(p) RTE_SET_USED(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define prefetch_for_load(p) { asm volatile ("pld [%0]" : : "r" (p)); }
+#define prefetch_for_store(p) { asm volatile ("pld [%0]" : : "r" (p)); }
+
+#else
+#define dcbz(p) RTE_SET_USED(p)
+#define lwsync()
+#define dcbf(p) RTE_SET_USED(p)
+#define dccivac(p) RTE_SET_USED(p)
+static inline void prefetch_for_load(void *p)
+{
+ RTE_SET_USED(p);
+}
+static inline void prefetch_for_store(void *p)
+{
+ RTE_SET_USED(p);
+}
+#endif
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index b7db0741..fe45a113 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -2,7 +2,6 @@ DPDK_17.05 {
global:
dpaa2_affine_qbman_swp;
- dpaa2_affine_qbman_swp_sec;
dpaa2_alloc_dpbp_dev;
dpaa2_alloc_dq_storage;
dpaa2_free_dpbp_dev;
@@ -101,3 +100,19 @@ DPDK_18.02 {
rte_fslmc_get_device_count;
} DPDK_17.11;
+
+DPDK_18.05 {
+ global:
+
+ dpaa2_affine_qbman_ethrx_swp;
+ dpdmai_close;
+ dpdmai_disable;
+ dpdmai_enable;
+ dpdmai_get_attributes;
+ dpdmai_get_rx_queue;
+ dpdmai_get_tx_queue;
+ dpdmai_open;
+ dpdmai_set_rx_queue;
+ rte_dpaa2_free_dpci_dev;
+
+} DPDK_18.02;
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 69d0fec7..33552b48 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -31,6 +31,7 @@ extern "C" {
#include <rte_dev.h>
#include <rte_bus.h>
#include <rte_tailq.h>
+#include <rte_devargs.h>
#include <fslmc_vfio.h>
@@ -49,6 +50,9 @@ struct rte_dpaa2_driver;
TAILQ_HEAD(rte_fslmc_device_list, rte_dpaa2_device);
TAILQ_HEAD(rte_fslmc_driver_list, rte_dpaa2_driver);
+#define RTE_DEV_TO_FSLMC_CONST(ptr) \
+ container_of(ptr, const struct rte_dpaa2_device, device)
+
extern struct rte_fslmc_bus rte_fslmc_bus;
enum rte_dpaa2_dev_type {
@@ -61,6 +65,7 @@ enum rte_dpaa2_dev_type {
DPAA2_IO, /**< DPIO type device */
DPAA2_CI, /**< DPCI type device */
DPAA2_MPORTAL, /**< DPMCP type device */
+ DPAA2_QDMA, /**< DPDMAI type device */
/* Unknown device placeholder */
DPAA2_UNKNOWN,
DPAA2_DEVTYPE_MAX,
@@ -91,6 +96,7 @@ struct rte_dpaa2_device {
union {
struct rte_eth_dev *eth_dev; /**< ethernet device */
struct rte_cryptodev *cryptodev; /**< Crypto Device */
+ struct rte_rawdev *rawdev; /**< Raw Device */
};
enum rte_dpaa2_dev_type dev_type; /**< Device Type */
uint16_t object_id; /**< DPAA2 Object ID */
diff --git a/drivers/bus/ifpga/Makefile b/drivers/bus/ifpga/Makefile
new file mode 100644
index 00000000..3ff3bdb8
--- /dev/null
+++ b/drivers/bus/ifpga/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_bus_ifpga.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_bus_ifpga_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_bus.c
+SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_common.c
+
+#
+# Export include files
+#
+SYMLINK-$(CONFIG_RTE_LIBRTE_IFPGA_BUS)-include += rte_bus_ifpga.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/ifpga/ifpga_bus.c b/drivers/bus/ifpga/ifpga_bus.c
new file mode 100644
index 00000000..b324872e
--- /dev/null
+++ b/drivers/bus/ifpga/ifpga_bus.c
@@ -0,0 +1,467 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_errno.h>
+#include <rte_bus.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+#include <rte_string_fns.h>
+
+#include "rte_rawdev.h"
+#include "rte_rawdev_pmd.h"
+#include "rte_bus_ifpga.h"
+#include "ifpga_logs.h"
+#include "ifpga_common.h"
+
+int ifpga_bus_logtype;
+
+/* Forward declaration to access Intel FPGA bus
+ * on which iFPGA devices are connected
+ */
+static struct rte_bus rte_ifpga_bus;
+
+static struct ifpga_afu_dev_list ifpga_afu_dev_list =
+ TAILQ_HEAD_INITIALIZER(ifpga_afu_dev_list);
+static struct ifpga_afu_drv_list ifpga_afu_drv_list =
+ TAILQ_HEAD_INITIALIZER(ifpga_afu_drv_list);
+
+
+/* register a ifpga bus based driver */
+void rte_ifpga_driver_register(struct rte_afu_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ TAILQ_INSERT_TAIL(&ifpga_afu_drv_list, driver, next);
+}
+
+/* un-register a fpga bus based driver */
+void rte_ifpga_driver_unregister(struct rte_afu_driver *driver)
+{
+ TAILQ_REMOVE(&ifpga_afu_drv_list, driver, next);
+}
+
+static struct rte_afu_device *
+ifpga_find_afu_dev(const struct rte_rawdev *rdev,
+ const struct rte_afu_id *afu_id)
+{
+ struct rte_afu_device *afu_dev = NULL;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (afu_dev &&
+ afu_dev->rawdev == rdev &&
+ !ifpga_afu_id_cmp(&afu_dev->id, afu_id))
+ return afu_dev;
+ }
+ return NULL;
+}
+
+static const char * const valid_args[] = {
+#define IFPGA_ARG_NAME "ifpga"
+ IFPGA_ARG_NAME,
+#define IFPGA_ARG_PORT "port"
+ IFPGA_ARG_PORT,
+#define IFPGA_AFU_BTS "afu_bts"
+ IFPGA_AFU_BTS,
+ NULL
+};
+
+/*
+ * Scan the content of the FPGA bus, and the devices in the devices
+ * list
+ */
+static struct rte_afu_device *
+ifpga_scan_one(struct rte_rawdev *rawdev,
+ struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_afu_device *afu_dev = NULL;
+ struct rte_afu_pr_conf afu_pr_conf;
+ int ret = 0;
+ char *path = NULL;
+
+ memset(&afu_pr_conf, 0, sizeof(struct rte_afu_pr_conf));
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_BUS_ERR("error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_PORT,
+ &rte_ifpga_get_integer32_arg, &afu_pr_conf.afu_id.port) < 0) {
+ IFPGA_BUS_ERR("error to parse %s",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_AFU_BTS) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_AFU_BTS,
+ &rte_ifpga_get_string_arg, &path) < 0) {
+ IFPGA_BUS_ERR("Failed to parse %s",
+ IFPGA_AFU_BTS);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_AFU_BTS);
+ goto end;
+ }
+
+ afu_pr_conf.afu_id.uuid.uuid_low = 0;
+ afu_pr_conf.afu_id.uuid.uuid_high = 0;
+ afu_pr_conf.pr_enable = path?1:0;
+
+ if (ifpga_find_afu_dev(rawdev, &afu_pr_conf.afu_id))
+ goto end;
+
+ afu_dev = calloc(1, sizeof(*afu_dev));
+ if (!afu_dev)
+ goto end;
+
+ afu_dev->device.devargs = devargs;
+ afu_dev->device.numa_node = SOCKET_ID_ANY;
+ afu_dev->device.name = devargs->name;
+ afu_dev->rawdev = rawdev;
+ afu_dev->id.uuid.uuid_low = 0;
+ afu_dev->id.uuid.uuid_high = 0;
+ afu_dev->id.port = afu_pr_conf.afu_id.port;
+
+ if (rawdev->dev_ops && rawdev->dev_ops->dev_info_get)
+ rawdev->dev_ops->dev_info_get(rawdev, afu_dev);
+
+ if (rawdev->dev_ops &&
+ rawdev->dev_ops->dev_start &&
+ rawdev->dev_ops->dev_start(rawdev))
+ goto end;
+
+ strlcpy(afu_pr_conf.bs_path, path, sizeof(afu_pr_conf.bs_path));
+ if (rawdev->dev_ops &&
+ rawdev->dev_ops->firmware_load &&
+ rawdev->dev_ops->firmware_load(rawdev,
+ &afu_pr_conf)){
+ IFPGA_BUS_ERR("firmware load error %d\n", ret);
+ goto end;
+ }
+ afu_dev->id.uuid.uuid_low = afu_pr_conf.afu_id.uuid.uuid_low;
+ afu_dev->id.uuid.uuid_high = afu_pr_conf.afu_id.uuid.uuid_high;
+
+ rte_kvargs_free(kvlist);
+ free(path);
+ return afu_dev;
+
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (path)
+ free(path);
+ if (afu_dev)
+ free(afu_dev);
+
+ return NULL;
+}
+
+/*
+ * Scan the content of the FPGA bus, and the devices in the devices
+ * list
+ */
+static int
+ifpga_scan(void)
+{
+ struct rte_devargs *devargs;
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_rawdev *rawdev = NULL;
+ char *name = NULL;
+ char name1[RTE_RAWDEV_NAME_MAX_LEN];
+ struct rte_afu_device *afu_dev = NULL;
+
+ /* for FPGA devices we scan the devargs_list populated via cmdline */
+ RTE_EAL_DEVARGS_FOREACH(IFPGA_ARG_NAME, devargs) {
+ if (devargs->bus != &rte_ifpga_bus)
+ continue;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_BUS_ERR("error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+ &rte_ifpga_get_string_arg, &name) < 0) {
+ IFPGA_BUS_ERR("error to parse %s",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+
+ memset(name1, 0, sizeof(name1));
+ snprintf(name1, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", name);
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name1);
+ if (!rawdev)
+ goto end;
+
+ afu_dev = ifpga_scan_one(rawdev, devargs);
+ if (afu_dev != NULL)
+ TAILQ_INSERT_TAIL(&ifpga_afu_dev_list, afu_dev, next);
+ }
+
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (name)
+ free(name);
+
+ return 0;
+}
+
+/*
+ * Match the AFU Driver and AFU Device using the ID Table
+ */
+static int
+rte_afu_match(const struct rte_afu_driver *afu_drv,
+ const struct rte_afu_device *afu_dev)
+{
+ const struct rte_afu_uuid *id_table;
+
+ for (id_table = afu_drv->id_table;
+ ((id_table->uuid_low != 0) && (id_table->uuid_high != 0));
+ id_table++) {
+ /* check if device's identifiers match the driver's ones */
+ if ((id_table->uuid_low != afu_dev->id.uuid.uuid_low) ||
+ id_table->uuid_high !=
+ afu_dev->id.uuid.uuid_high)
+ continue;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+ifpga_probe_one_driver(struct rte_afu_driver *drv,
+ struct rte_afu_device *afu_dev)
+{
+ int ret;
+
+ if (!rte_afu_match(drv, afu_dev))
+ /* Match of device and driver failed */
+ return 1;
+
+ /* reference driver structure */
+ afu_dev->driver = drv;
+ afu_dev->device.driver = &drv->driver;
+
+ /* call the driver probe() function */
+ ret = drv->probe(afu_dev);
+ if (ret) {
+ afu_dev->driver = NULL;
+ afu_dev->device.driver = NULL;
+ }
+
+ return ret;
+}
+
+static int
+ifpga_probe_all_drivers(struct rte_afu_device *afu_dev)
+{
+ struct rte_afu_driver *drv = NULL;
+ int ret = 0;
+
+ if (afu_dev == NULL)
+ return -1;
+
+ /* Check if a driver is already loaded */
+ if (afu_dev->driver != NULL)
+ return 0;
+
+ TAILQ_FOREACH(drv, &ifpga_afu_drv_list, next) {
+ if (ifpga_probe_one_driver(drv, afu_dev)) {
+ ret = -1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Scan the content of the Intel FPGA bus, and call the probe() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+static int
+ifpga_probe(void)
+{
+ struct rte_afu_device *afu_dev = NULL;
+ int ret = 0;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (afu_dev->device.driver)
+ continue;
+
+ ret = ifpga_probe_all_drivers(afu_dev);
+ if (ret < 0)
+ IFPGA_BUS_ERR("failed to initialize %s device\n",
+ rte_ifpga_device_name(afu_dev));
+ }
+
+ return ret;
+}
+
+static int
+ifpga_plug(struct rte_device *dev)
+{
+ return ifpga_probe_all_drivers(RTE_DEV_TO_AFU(dev));
+}
+
+static int
+ifpga_remove_driver(struct rte_afu_device *afu_dev)
+{
+ const char *name;
+ const struct rte_afu_driver *driver;
+
+ name = rte_ifpga_device_name(afu_dev);
+ if (!afu_dev->device.driver) {
+ IFPGA_BUS_DEBUG("no driver attach to device %s\n", name);
+ return 1;
+ }
+
+ driver = RTE_DRV_TO_AFU_CONST(afu_dev->device.driver);
+ return driver->remove(afu_dev);
+}
+
+static int
+ifpga_unplug(struct rte_device *dev)
+{
+ struct rte_afu_device *afu_dev = NULL;
+ struct rte_devargs *devargs = NULL;
+ int ret;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ afu_dev = RTE_DEV_TO_AFU(dev);
+ if (!afu_dev)
+ return -ENOENT;
+
+ devargs = dev->devargs;
+
+ ret = ifpga_remove_driver(afu_dev);
+ if (ret)
+ return ret;
+
+ TAILQ_REMOVE(&ifpga_afu_dev_list, afu_dev, next);
+
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(afu_dev);
+ return 0;
+
+}
+
+static struct rte_device *
+ifpga_find_device(const struct rte_device *start,
+ rte_dev_cmp_t cmp, const void *data)
+{
+ struct rte_afu_device *afu_dev;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (start && &afu_dev->device == start) {
+ start = NULL;
+ continue;
+ }
+ if (cmp(&afu_dev->device, data) == 0)
+ return &afu_dev->device;
+ }
+
+ return NULL;
+}
+static int
+ifpga_parse(const char *name, void *addr)
+{
+ int *out = addr;
+ struct rte_rawdev *rawdev = NULL;
+ char rawdev_name[RTE_RAWDEV_NAME_MAX_LEN];
+ char *c1 = NULL;
+ char *c2 = NULL;
+ int port = IFPGA_BUS_DEV_PORT_MAX;
+ char str_port[8];
+ int str_port_len = 0;
+ int ret;
+
+ memset(str_port, 0, 8);
+ c1 = strchr(name, '|');
+ if (c1 != NULL) {
+ str_port_len = c1 - name;
+ c2 = c1 + 1;
+ }
+
+ if (str_port_len < 8 &&
+ str_port_len > 0) {
+ memcpy(str_port, name, str_port_len);
+ ret = sscanf(str_port, "%d", &port);
+ if (ret == -1)
+ return 0;
+ }
+
+ memset(rawdev_name, 0, sizeof(rawdev_name));
+ snprintf(rawdev_name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", c2);
+ rawdev = rte_rawdev_pmd_get_named_dev(rawdev_name);
+
+ if ((port < IFPGA_BUS_DEV_PORT_MAX) &&
+ rawdev &&
+ (addr != NULL))
+ *out = port;
+
+ if ((port < IFPGA_BUS_DEV_PORT_MAX) &&
+ rawdev)
+ return 0;
+ else
+ return 1;
+}
+
+static struct rte_bus rte_ifpga_bus = {
+ .scan = ifpga_scan,
+ .probe = ifpga_probe,
+ .find_device = ifpga_find_device,
+ .plug = ifpga_plug,
+ .unplug = ifpga_unplug,
+ .parse = ifpga_parse,
+};
+
+RTE_REGISTER_BUS(IFPGA_BUS_NAME, rte_ifpga_bus);
+
+RTE_INIT(ifpga_init_log)
+{
+ ifpga_bus_logtype = rte_log_register("bus.ifpga");
+ if (ifpga_bus_logtype >= 0)
+ rte_log_set_level(ifpga_bus_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bus/ifpga/ifpga_common.c b/drivers/bus/ifpga/ifpga_common.c
new file mode 100644
index 00000000..78e2eaee
--- /dev/null
+++ b/drivers/bus/ifpga/ifpga_common.c
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_errno.h>
+#include <rte_bus.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+
+#include "rte_bus_ifpga.h"
+#include "ifpga_logs.h"
+#include "ifpga_common.h"
+
+int rte_ifpga_get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(char **)extra_args = strdup(value);
+
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
+ return 0;
+}
+int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(int *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+int ifpga_get_integer64_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint64_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+int ifpga_get_unsigned_long(const char *str, int base)
+{
+ unsigned long num;
+ char *end = NULL;
+
+ errno = 0;
+
+ num = strtoul(str, &end, base);
+ if ((str[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
+ return -1;
+
+ return num;
+}
+
+int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
+ const struct rte_afu_id *afu_id1)
+{
+ if ((afu_id0->uuid.uuid_low == afu_id1->uuid.uuid_low) &&
+ (afu_id0->uuid.uuid_high == afu_id1->uuid.uuid_high) &&
+ (afu_id0->port == afu_id1->port)) {
+ return 0;
+ } else
+ return 1;
+}
diff --git a/drivers/bus/ifpga/ifpga_common.h b/drivers/bus/ifpga/ifpga_common.h
new file mode 100644
index 00000000..f9254b9d
--- /dev/null
+++ b/drivers/bus/ifpga/ifpga_common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_COMMON_H_
+#define _IFPGA_COMMON_H_
+
+int rte_ifpga_get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int ifpga_get_integer64_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int ifpga_get_unsigned_long(const char *str, int base);
+int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
+ const struct rte_afu_id *afu_id1);
+
+#endif /* _IFPGA_COMMON_H_ */
diff --git a/drivers/bus/ifpga/ifpga_logs.h b/drivers/bus/ifpga/ifpga_logs.h
new file mode 100644
index 00000000..873e0a4f
--- /dev/null
+++ b/drivers/bus/ifpga/ifpga_logs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_LOGS_H_
+#define _IFPGA_LOGS_H_
+
+#include <rte_log.h>
+
+extern int ifpga_bus_logtype;
+
+#define IFPGA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IFPGA_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IFPGA_BUS_FUNC_TRACE() IFPGA_BUS_LOG(DEBUG, ">>")
+
+#define IFPGA_BUS_DEBUG(fmt, args...) \
+ IFPGA_BUS_LOG(DEBUG, fmt, ## args)
+#define IFPGA_BUS_INFO(fmt, args...) \
+ IFPGA_BUS_LOG(INFO, fmt, ## args)
+#define IFPGA_BUS_ERR(fmt, args...) \
+ IFPGA_BUS_LOG(ERR, fmt, ## args)
+#define IFPGA_BUS_WARN(fmt, args...) \
+ IFPGA_BUS_LOG(WARNING, fmt, ## args)
+
+#endif /* _IFPGA_BUS_LOGS_H_ */
diff --git a/drivers/bus/ifpga/meson.build b/drivers/bus/ifpga/meson.build
new file mode 100644
index 00000000..c9b08c86
--- /dev/null
+++ b/drivers/bus/ifpga/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2018 Intel Corporation
+
+deps += ['pci', 'kvargs', 'rawdev']
+install_headers('rte_bus_ifpga.h')
+sources = files('ifpga_common.c', 'ifpga_bus.c')
+
+allow_experimental_apis = true
diff --git a/drivers/bus/ifpga/rte_bus_ifpga.h b/drivers/bus/ifpga/rte_bus_ifpga.h
new file mode 100644
index 00000000..981bc352
--- /dev/null
+++ b/drivers/bus/ifpga/rte_bus_ifpga.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _RTE_BUS_IFPGA_H_
+#define _RTE_BUS_IFPGA_H_
+
+/**
+ * @file
+ *
+ * RTE Intel FPGA Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_bus.h>
+#include <rte_pci.h>
+
+/** Name of Intel FPGA Bus */
+#define IFPGA_BUS_NAME ifpga
+
+/* Forward declarations */
+struct rte_afu_device;
+struct rte_afu_driver;
+
+/** Double linked list of Intel FPGA AFU device. */
+TAILQ_HEAD(ifpga_afu_dev_list, rte_afu_device);
+/** Double linked list of Intel FPGA AFU device drivers. */
+TAILQ_HEAD(ifpga_afu_drv_list, rte_afu_driver);
+
+#define IFPGA_BUS_BITSTREAM_PATH_MAX_LEN 256
+
+struct rte_afu_uuid {
+ uint64_t uuid_low;
+ uint64_t uuid_high;
+} __attribute__ ((packed));
+
+#define IFPGA_BUS_DEV_PORT_MAX 4
+
+/**
+ * A structure describing an ID for a AFU driver. Each driver provides a
+ * table of these IDs for each device that it supports.
+ */
+struct rte_afu_id {
+ struct rte_afu_uuid uuid;
+ int port; /**< port number */
+} __attribute__ ((packed));
+
+/**
+ * A structure PR (Partial Reconfiguration) configuration AFU driver.
+ */
+
+struct rte_afu_pr_conf {
+ struct rte_afu_id afu_id;
+ int pr_enable;
+ char bs_path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN];
+};
+
+#define AFU_PRI_STR_SIZE (PCI_PRI_STR_SIZE + 8)
+
+/**
+ * A structure describing a AFU device.
+ */
+struct rte_afu_device {
+ TAILQ_ENTRY(rte_afu_device) next; /**< Next in device list. */
+ struct rte_device device; /**< Inherit core device */
+ struct rte_rawdev *rawdev; /**< Point Rawdev */
+ struct rte_afu_id id; /**< AFU id within FPGA. */
+ uint32_t num_region; /**< number of regions found */
+ struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
+ /**< AFU Memory Resource */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_afu_driver *driver; /**< Associated driver */
+ char path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN];
+} __attribute__ ((packed));
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_afu_device.
+ */
+#define RTE_DEV_TO_AFU(ptr) \
+ container_of(ptr, struct rte_afu_device, device)
+
+#define RTE_DRV_TO_AFU_CONST(ptr) \
+ container_of(ptr, const struct rte_afu_driver, driver)
+
+/**
+ * Initialization function for the driver called during FPGA BUS probing.
+ */
+typedef int (afu_probe_t)(struct rte_afu_device *);
+
+/**
+ * Uninitialization function for the driver called during hotplugging.
+ */
+typedef int (afu_remove_t)(struct rte_afu_device *);
+
+/**
+ * A structure describing a AFU device.
+ */
+struct rte_afu_driver {
+ TAILQ_ENTRY(rte_afu_driver) next; /**< Next afu driver. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ afu_probe_t *probe; /**< Device Probe function. */
+ afu_remove_t *remove; /**< Device Remove function. */
+ const struct rte_afu_uuid *id_table; /**< AFU uuid within FPGA. */
+};
+
+static inline const char *
+rte_ifpga_device_name(const struct rte_afu_device *afu)
+{
+ if (afu && afu->device.name)
+ return afu->device.name;
+ return NULL;
+}
+
+/**
+ * Register a ifpga afu device driver.
+ *
+ * @param driver
+ * A pointer to a rte_afu_driver structure describing the driver
+ * to be registered.
+ */
+void rte_ifpga_driver_register(struct rte_afu_driver *driver);
+
+/**
+ * Unregister a ifpga afu device driver.
+ *
+ * @param driver
+ * A pointer to a rte_afu_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_ifpga_driver_unregister(struct rte_afu_driver *driver);
+
+#define RTE_PMD_REGISTER_AFU(nm, afudrv)\
+RTE_INIT(afudrvinitfn_ ##afudrv);\
+static const char *afudrvinit_ ## nm ## _alias;\
+static void afudrvinitfn_ ##afudrv(void)\
+{\
+ (afudrv).driver.name = RTE_STR(nm);\
+ (afudrv).driver.alias = afudrvinit_ ## nm ## _alias;\
+ rte_ifpga_driver_register(&afudrv);\
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#define RTE_PMD_REGISTER_AFU_ALIAS(nm, alias)\
+static const char *afudrvinit_ ## nm ## _alias = RTE_STR(alias)
+
+#endif /* _RTE_BUS_IFPGA_H_ */
diff --git a/drivers/bus/ifpga/rte_bus_ifpga_version.map b/drivers/bus/ifpga/rte_bus_ifpga_version.map
new file mode 100644
index 00000000..a0279797
--- /dev/null
+++ b/drivers/bus/ifpga/rte_bus_ifpga_version.map
@@ -0,0 +1,10 @@
+DPDK_18.05 {
+ global:
+
+ rte_ifpga_get_integer32_arg;
+ rte_ifpga_get_string_arg;
+ rte_ifpga_driver_register;
+ rte_ifpga_driver_unregister;
+
+ local: *;
+};
diff --git a/drivers/bus/meson.build b/drivers/bus/meson.build
index c6af500e..52c755dc 100644
--- a/drivers/bus/meson.build
+++ b/drivers/bus/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['pci', 'vdev']
+drivers = ['dpaa', 'fslmc', 'ifpga', 'pci', 'vdev']
std_deps = ['eal']
config_flag_fmt = 'RTE_LIBRTE_@0@_BUS'
driver_name_fmt = 'rte_bus_@0@'
diff --git a/drivers/bus/pci/Makefile b/drivers/bus/pci/Makefile
index f3df1c4c..cf373068 100644
--- a/drivers/bus/pci/Makefile
+++ b/drivers/bus/pci/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -37,6 +9,7 @@ EXPORT_MAP := rte_bus_pci_version.map
CFLAGS := -I$(SRCDIR) $(CFLAGS)
CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),)
SYSTEM := linux
@@ -49,6 +22,9 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/pci/$(SYSTEM)
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
+# memseg walk is not part of stable API yet
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_pci
diff --git a/drivers/bus/pci/bsd/Makefile b/drivers/bus/pci/bsd/Makefile
index 4450913e..c1b54c05 100644
--- a/drivers/bus/pci/bsd/Makefile
+++ b/drivers/bus/pci/bsd/Makefile
@@ -1,32 +1,4 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
SRCS += pci.c
diff --git a/drivers/bus/pci/linux/Makefile b/drivers/bus/pci/linux/Makefile
index 77c5f970..96ea1d54 100644
--- a/drivers/bus/pci/linux/Makefile
+++ b/drivers/bus/pci/linux/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
SRCS += pci.c
SRCS += pci_uio.c
diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
index abde6411..004600f1 100644
--- a/drivers/bus/pci/linux/pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -33,7 +33,8 @@
extern struct rte_pci_bus rte_pci_bus;
static int
-pci_get_kernel_driver_by_path(const char *filename, char *dri_name)
+pci_get_kernel_driver_by_path(const char *filename, char *dri_name,
+ size_t len)
{
int count;
char path[PATH_MAX];
@@ -54,7 +55,7 @@ pci_get_kernel_driver_by_path(const char *filename, char *dri_name)
name = strrchr(path, '/');
if (name) {
- strncpy(dri_name, name + 1, strlen(name + 1) + 1);
+ strlcpy(dri_name, name + 1, len);
return 0;
}
@@ -116,24 +117,28 @@ rte_pci_unmap_device(struct rte_pci_device *dev)
}
}
-void *
-pci_find_max_end_va(void)
+static int
+find_max_end_va(const struct rte_memseg_list *msl, void *arg)
{
- const struct rte_memseg *seg = rte_eal_get_physmem_layout();
- const struct rte_memseg *last = seg;
- unsigned i = 0;
+ size_t sz = msl->memseg_arr.len * msl->page_sz;
+ void *end_va = RTE_PTR_ADD(msl->base_va, sz);
+ void **max_va = arg;
- for (i = 0; i < RTE_MAX_MEMSEG; i++, seg++) {
- if (seg->addr == NULL)
- break;
+ if (*max_va < end_va)
+ *max_va = end_va;
+ return 0;
+}
- if (seg->addr > last->addr)
- last = seg;
+void *
+pci_find_max_end_va(void)
+{
+ void *va = NULL;
- }
- return RTE_PTR_ADD(last->addr, last->len);
+ rte_memseg_list_walk(find_max_end_va, &va);
+ return va;
}
+
/* parse one line of the "resource" sysfs file (note that the 'line'
* string is modified)
*/
@@ -310,7 +315,7 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
/* parse driver */
snprintf(filename, sizeof(filename), "%s/driver", dirname);
- ret = pci_get_kernel_driver_by_path(filename, driver);
+ ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver));
if (ret < 0) {
RTE_LOG(ERR, EAL, "Fail to get kernel driver\n");
free(dev);
diff --git a/drivers/bus/pci/meson.build b/drivers/bus/pci/meson.build
index 12756a4d..72939e59 100644
--- a/drivers/bus/pci/meson.build
+++ b/drivers/bus/pci/meson.build
@@ -14,3 +14,6 @@ else
sources += files('bsd/pci.c')
includes += include_directories('bsd')
endif
+
+# memseg walk is not part of stable API yet
+allow_experimental_apis = true
diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
index 2a00f365..7215aaec 100644
--- a/drivers/bus/pci/pci_common.c
+++ b/drivers/bus/pci/pci_common.c
@@ -45,12 +45,8 @@ static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
{
struct rte_devargs *devargs;
struct rte_pci_addr addr;
- struct rte_bus *pbus;
- pbus = rte_bus_find_by_name("pci");
- TAILQ_FOREACH(devargs, &devargs_list, next) {
- if (devargs->bus != pbus)
- continue;
+ RTE_EAL_DEVARGS_FOREACH("pci", devargs) {
devargs->bus->parse(devargs->name, &addr);
if (!rte_pci_addr_cmp(&dev->addr, &addr))
return devargs;
@@ -459,17 +455,20 @@ static struct rte_device *
pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
const void *data)
{
- struct rte_pci_device *dev;
+ const struct rte_pci_device *pstart;
+ struct rte_pci_device *pdev;
- FOREACH_DEVICE_ON_PCIBUS(dev) {
- if (start && &dev->device == start) {
- start = NULL; /* starting point found */
- continue;
- }
- if (cmp(&dev->device, data) == 0)
- return &dev->device;
+ if (start != NULL) {
+ pstart = RTE_DEV_TO_PCI_CONST(start);
+ pdev = TAILQ_NEXT(pstart, next);
+ } else {
+ pdev = TAILQ_FIRST(&rte_pci_bus.device_list);
+ }
+ while (pdev != NULL) {
+ if (cmp(&pdev->device, data) == 0)
+ return &pdev->device;
+ pdev = TAILQ_NEXT(pdev, next);
}
-
return NULL;
}
diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h
index 357afb91..458e6d07 100644
--- a/drivers/bus/pci/rte_bus_pci.h
+++ b/drivers/bus/pci/rte_bus_pci.h
@@ -74,6 +74,9 @@ struct rte_pci_device {
*/
#define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
+#define RTE_DEV_TO_PCI_CONST(ptr) \
+ container_of(ptr, const struct rte_pci_device, device)
+
#define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
/** Any PCI device identifier (vendor, device, ...) */
diff --git a/drivers/bus/vdev/Makefile b/drivers/bus/vdev/Makefile
index 24d424a3..bd0bb895 100644
--- a/drivers/bus/vdev/Makefile
+++ b/drivers/bus/vdev/Makefile
@@ -10,6 +10,7 @@ LIB = librte_bus_vdev.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
# versioning export map
EXPORT_MAP := rte_bus_vdev_version.map
diff --git a/drivers/bus/vdev/meson.build b/drivers/bus/vdev/meson.build
index 03cb87eb..2ee648b4 100644
--- a/drivers/bus/vdev/meson.build
+++ b/drivers/bus/vdev/meson.build
@@ -3,3 +3,5 @@
sources = files('vdev.c')
install_headers('rte_bus_vdev.h')
+
+allow_experimental_apis = true
diff --git a/drivers/bus/vdev/rte_bus_vdev.h b/drivers/bus/vdev/rte_bus_vdev.h
index f9d8a238..f9b5eb59 100644
--- a/drivers/bus/vdev/rte_bus_vdev.h
+++ b/drivers/bus/vdev/rte_bus_vdev.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#ifndef RTE_VDEV_H
@@ -53,6 +25,9 @@ struct rte_vdev_device {
#define RTE_DEV_TO_VDEV(ptr) \
container_of(ptr, struct rte_vdev_device, device)
+#define RTE_DEV_TO_VDEV_CONST(ptr) \
+ container_of(ptr, const struct rte_vdev_device, device)
+
static inline const char *
rte_vdev_device_name(const struct rte_vdev_device *dev)
{
diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c
index e4bc7246..6139dd55 100644
--- a/drivers/bus/vdev/vdev.c
+++ b/drivers/bus/vdev/vdev.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#include <string.h>
@@ -46,11 +18,14 @@
#include <rte_memory.h>
#include <rte_tailq.h>
#include <rte_spinlock.h>
+#include <rte_string_fns.h>
#include <rte_errno.h>
#include "rte_bus_vdev.h"
#include "vdev_logs.h"
+#define VDEV_MP_KEY "bus_vdev_mp"
+
int vdev_logtype_bus;
/* Forward declare to access virtual bus name */
@@ -61,6 +36,10 @@ TAILQ_HEAD(vdev_device_list, rte_vdev_device);
static struct vdev_device_list vdev_device_list =
TAILQ_HEAD_INITIALIZER(vdev_device_list);
+/* The lock needs to be recursive because a vdev can manage another vdev. */
+static rte_spinlock_recursive_t vdev_device_list_lock =
+ RTE_SPINLOCK_RECURSIVE_INITIALIZER;
+
struct vdev_driver_list vdev_driver_list =
TAILQ_HEAD_INITIALIZER(vdev_driver_list);
@@ -165,7 +144,7 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev)
name = rte_vdev_device_name(dev);
- VDEV_LOG(DEBUG, "Search driver %s to probe device %s\n", name,
+ VDEV_LOG(DEBUG, "Search driver %s to probe device %s", name,
rte_vdev_device_name(dev));
if (vdev_parse(name, &driver))
@@ -177,6 +156,7 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev)
return ret;
}
+/* The caller shall be responsible for thread-safe */
static struct rte_vdev_device *
find_vdev(const char *name)
{
@@ -188,7 +168,7 @@ find_vdev(const char *name)
TAILQ_FOREACH(dev, &vdev_device_list, next) {
const char *devname = rte_vdev_device_name(dev);
- if (!strncmp(devname, name, strlen(name)))
+ if (!strcmp(devname, name))
return dev;
}
@@ -221,8 +201,8 @@ alloc_devargs(const char *name, const char *args)
return devargs;
}
-int
-rte_vdev_init(const char *name, const char *args)
+static int
+insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev)
{
struct rte_vdev_device *dev;
struct rte_devargs *devargs;
@@ -231,10 +211,6 @@ rte_vdev_init(const char *name, const char *args)
if (name == NULL)
return -EINVAL;
- dev = find_vdev(name);
- if (dev)
- return -EEXIST;
-
devargs = alloc_devargs(name, args);
if (!devargs)
return -ENOMEM;
@@ -249,18 +225,18 @@ rte_vdev_init(const char *name, const char *args)
dev->device.numa_node = SOCKET_ID_ANY;
dev->device.name = devargs->name;
- ret = vdev_probe_all_drivers(dev);
- if (ret) {
- if (ret > 0)
- VDEV_LOG(ERR, "no driver found for %s\n", name);
+ if (find_vdev(name)) {
+ ret = -EEXIST;
goto fail;
}
- TAILQ_INSERT_TAIL(&devargs_list, devargs, next);
-
TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
- return 0;
+ rte_devargs_insert(devargs);
+
+ if (p_dev)
+ *p_dev = dev;
+ return 0;
fail:
free(devargs->args);
free(devargs);
@@ -268,6 +244,31 @@ fail:
return ret;
}
+int
+rte_vdev_init(const char *name, const char *args)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ ret = insert_vdev(name, args, &dev);
+ if (ret == 0) {
+ ret = vdev_probe_all_drivers(dev);
+ if (ret) {
+ if (ret > 0)
+ VDEV_LOG(ERR, "no driver found for %s", name);
+ /* If fails, remove it from vdev list */
+ devargs = dev->device.devargs;
+ TAILQ_REMOVE(&vdev_device_list, dev, next);
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(dev);
+ }
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ return ret;
+}
+
static int
vdev_remove_driver(struct rte_vdev_device *dev)
{
@@ -275,7 +276,7 @@ vdev_remove_driver(struct rte_vdev_device *dev)
const struct rte_vdev_driver *driver;
if (!dev->device.driver) {
- VDEV_LOG(DEBUG, "no driver attach to device %s\n", name);
+ VDEV_LOG(DEBUG, "no driver attach to device %s", name);
return 1;
}
@@ -294,23 +295,98 @@ rte_vdev_uninit(const char *name)
if (name == NULL)
return -EINVAL;
- dev = find_vdev(name);
- if (!dev)
- return -ENOENT;
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
- devargs = dev->device.devargs;
+ dev = find_vdev(name);
+ if (!dev) {
+ ret = -ENOENT;
+ goto unlock;
+ }
ret = vdev_remove_driver(dev);
if (ret)
- return ret;
+ goto unlock;
TAILQ_REMOVE(&vdev_device_list, dev, next);
+ devargs = dev->device.devargs;
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(dev);
- TAILQ_REMOVE(&devargs_list, devargs, next);
+unlock:
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ return ret;
+}
+
+struct vdev_param {
+#define VDEV_SCAN_REQ 1
+#define VDEV_SCAN_ONE 2
+#define VDEV_SCAN_REP 3
+ int type;
+ int num;
+ char name[RTE_DEV_NAME_MAX_LEN];
+};
+
+static int vdev_plug(struct rte_device *dev);
+
+/**
+ * This function works as the action for both primary and secondary process
+ * for static vdev discovery when a secondary process is booting.
+ *
+ * step 1, secondary process sends a sync request to ask for vdev in primary;
+ * step 2, primary process receives the request, and send vdevs one by one;
+ * step 3, primary process sends back reply, which indicates how many vdevs
+ * are sent.
+ */
+static int
+vdev_action(const struct rte_mp_msg *mp_msg, const void *peer)
+{
+ struct rte_vdev_device *dev;
+ struct rte_mp_msg mp_resp;
+ struct vdev_param *ou = (struct vdev_param *)&mp_resp.param;
+ const struct vdev_param *in = (const struct vdev_param *)mp_msg->param;
+ const char *devname;
+ int num;
+
+ strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name));
+ mp_resp.len_param = sizeof(*ou);
+ mp_resp.num_fds = 0;
+
+ switch (in->type) {
+ case VDEV_SCAN_REQ:
+ ou->type = VDEV_SCAN_ONE;
+ ou->num = 1;
+ num = 0;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ devname = rte_vdev_device_name(dev);
+ if (strlen(devname) == 0) {
+ VDEV_LOG(INFO, "vdev with no name is not sent");
+ continue;
+ }
+ VDEV_LOG(INFO, "send vdev, %s", devname);
+ strlcpy(ou->name, devname, RTE_DEV_NAME_MAX_LEN);
+ if (rte_mp_sendmsg(&mp_resp) < 0)
+ VDEV_LOG(ERR, "send vdev, %s, failed, %s",
+ devname, strerror(rte_errno));
+ num++;
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+
+ ou->type = VDEV_SCAN_REP;
+ ou->num = num;
+ if (rte_mp_reply(&mp_resp, peer) < 0)
+ VDEV_LOG(ERR, "Failed to reply a scan request");
+ break;
+ case VDEV_SCAN_ONE:
+ VDEV_LOG(INFO, "receive vdev, %s", in->name);
+ if (insert_vdev(in->name, NULL, NULL) < 0)
+ VDEV_LOG(ERR, "failed to add vdev, %s", in->name);
+ break;
+ default:
+ VDEV_LOG(ERR, "vdev cannot recognize this message");
+ }
- free(devargs->args);
- free(devargs);
- free(dev);
return 0;
}
@@ -321,13 +397,41 @@ vdev_scan(void)
struct rte_devargs *devargs;
struct vdev_custom_scan *custom_scan;
+ if (rte_mp_action_register(VDEV_MP_KEY, vdev_action) < 0 &&
+ rte_errno != EEXIST) {
+ VDEV_LOG(ERR, "Failed to add vdev mp action");
+ return -1;
+ }
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply;
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ struct vdev_param *req = (struct vdev_param *)mp_req.param;
+ struct vdev_param *resp;
+
+ strlcpy(mp_req.name, VDEV_MP_KEY, sizeof(mp_req.name));
+ mp_req.len_param = sizeof(*req);
+ mp_req.num_fds = 0;
+ req->type = VDEV_SCAN_REQ;
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+ mp_reply.nb_received == 1) {
+ mp_rep = &mp_reply.msgs[0];
+ resp = (struct vdev_param *)mp_rep->param;
+ VDEV_LOG(INFO, "Received %d vdevs", resp->num);
+ } else
+ VDEV_LOG(ERR, "Failed to request vdev from primary");
+
+ /* Fall through to allow private vdevs in secondary process */
+ }
+
/* call custom scan callbacks if any */
rte_spinlock_lock(&vdev_custom_scan_lock);
TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
if (custom_scan->callback != NULL)
/*
* the callback should update devargs list
- * by calling rte_eal_devargs_insert() with
+ * by calling rte_devargs_insert() with
* devargs.bus = rte_bus_find_by_name("vdev");
* devargs.type = RTE_DEVTYPE_VIRTUAL;
* devargs.policy = RTE_DEV_WHITELISTED;
@@ -337,24 +441,27 @@ vdev_scan(void)
rte_spinlock_unlock(&vdev_custom_scan_lock);
/* for virtual devices we scan the devargs_list populated via cmdline */
- TAILQ_FOREACH(devargs, &devargs_list, next) {
-
- if (devargs->bus != &rte_vdev_bus)
- continue;
-
- dev = find_vdev(devargs->name);
- if (dev)
- continue;
+ RTE_EAL_DEVARGS_FOREACH("vdev", devargs) {
dev = calloc(1, sizeof(*dev));
if (!dev)
return -1;
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+
+ if (find_vdev(devargs->name)) {
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ free(dev);
+ continue;
+ }
+
dev->device.devargs = devargs;
dev->device.numa_node = SOCKET_ID_ANY;
dev->device.name = devargs->name;
TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
+
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
}
return 0;
@@ -368,12 +475,16 @@ vdev_probe(void)
/* call the init function for each virtual device */
TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ /* we don't use the vdev lock here, as it's only used in DPDK
+ * initialization; and we don't want to hold such a lock when
+ * we call each driver probe.
+ */
if (dev->device.driver)
continue;
if (vdev_probe_all_drivers(dev)) {
- VDEV_LOG(ERR, "failed to initialize %s device\n",
+ VDEV_LOG(ERR, "failed to initialize %s device",
rte_vdev_device_name(dev));
ret = -1;
}
@@ -386,17 +497,24 @@ static struct rte_device *
vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
const void *data)
{
+ const struct rte_vdev_device *vstart;
struct rte_vdev_device *dev;
- TAILQ_FOREACH(dev, &vdev_device_list, next) {
- if (start && &dev->device == start) {
- start = NULL;
- continue;
- }
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ if (start != NULL) {
+ vstart = RTE_DEV_TO_VDEV_CONST(start);
+ dev = TAILQ_NEXT(vstart, next);
+ } else {
+ dev = TAILQ_FIRST(&vdev_device_list);
+ }
+ while (dev != NULL) {
if (cmp(&dev->device, data) == 0)
- return &dev->device;
+ break;
+ dev = TAILQ_NEXT(dev, next);
}
- return NULL;
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+
+ return dev ? &dev->device : NULL;
}
static int
diff --git a/drivers/common/Makefile b/drivers/common/Makefile
new file mode 100644
index 00000000..0fd22376
--- /dev/null
+++ b/drivers/common/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL),yy)
+DIRS-y += octeontx
+endif
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/common/meson.build b/drivers/common/meson.build
new file mode 100644
index 00000000..5f6341b8
--- /dev/null
+++ b/drivers/common/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+std_deps = ['eal']
+drivers = ['octeontx']
+config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
+driver_name_fmt = 'rte_common_@0@'
diff --git a/drivers/common/octeontx/Makefile b/drivers/common/octeontx/Makefile
new file mode 100644
index 00000000..dfdb9f19
--- /dev/null
+++ b/drivers/common/octeontx/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+EXPORT_MAP := rte_common_octeontx_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += octeontx_mbox.c
+
+LDLIBS += -lrte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/common/octeontx/meson.build b/drivers/common/octeontx/meson.build
new file mode 100644
index 00000000..203d1ef4
--- /dev/null
+++ b/drivers/common/octeontx/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+sources = files('octeontx_mbox.c')
diff --git a/drivers/mempool/octeontx/octeontx_mbox.c b/drivers/common/octeontx/octeontx_mbox.c
index f8cb6a45..93e6e857 100644
--- a/drivers/mempool/octeontx/octeontx_mbox.c
+++ b/drivers/common/octeontx/octeontx_mbox.c
@@ -11,7 +11,6 @@
#include <rte_spinlock.h>
#include "octeontx_mbox.h"
-#include "octeontx_pool_logs.h"
/* Mbox operation timeout in seconds */
#define MBOX_WAIT_TIME_SEC 3
@@ -60,6 +59,17 @@ struct mbox_ram_hdr {
};
};
+int octeontx_logtype_mbox;
+
+RTE_INIT(otx_init_log);
+static void
+otx_init_log(void)
+{
+ octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox");
+ if (octeontx_logtype_mbox >= 0)
+ rte_log_set_level(octeontx_logtype_mbox, RTE_LOG_NOTICE);
+}
+
static inline void
mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size)
{
@@ -181,33 +191,60 @@ mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
return res;
}
-static inline int
-mbox_setup(struct mbox *m)
+int
+octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ if (m->init_once)
+ return -EALREADY;
+
+ if (ram_mbox_base == NULL) {
+ mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base);
+ return -EINVAL;
+ }
+
+ m->ram_mbox_base = ram_mbox_base;
+
+ if (m->reg != NULL) {
+ rte_spinlock_init(&m->lock);
+ m->init_once = 1;
+ }
+
+ return 0;
+}
+
+int
+octeontx_mbox_set_reg(uint8_t *reg)
{
- if (unlikely(m->init_once == 0)) {
+ struct mbox *m = &octeontx_mbox;
+
+ if (m->init_once)
+ return -EALREADY;
+
+ if (reg == NULL) {
+ mbox_log_err("Invalid reg=%p", reg);
+ return -EINVAL;
+ }
+
+ m->reg = reg;
+
+ if (m->ram_mbox_base != NULL) {
rte_spinlock_init(&m->lock);
- m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
- m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
- m->reg += SSO_VHGRP_PF_MBOX(1);
-
- if (m->ram_mbox_base == NULL || m->reg == NULL) {
- mbox_log_err("Invalid ram_mbox_base=%p or reg=%p",
- m->ram_mbox_base, m->reg);
- return -EINVAL;
- }
m->init_once = 1;
}
+
return 0;
}
int
-octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
+octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
uint16_t txlen, void *rxdata, uint16_t rxlen)
{
struct mbox *m = &octeontx_mbox;
RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
- if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m))
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EINVAL;
return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
diff --git a/drivers/common/octeontx/octeontx_mbox.h b/drivers/common/octeontx/octeontx_mbox.h
new file mode 100644
index 00000000..43fbda28
--- /dev/null
+++ b/drivers/common/octeontx/octeontx_mbox.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_MBOX_H__
+#define __OCTEONTX_MBOX_H__
+
+#include <rte_common.h>
+#include <rte_spinlock.h>
+
+#define SSOW_BAR4_LEN (64 * 1024)
+#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+
+#define MBOX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\
+ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
+
+#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__)
+#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__)
+#define mbox_func_trace mbox_log_dbg
+
+extern int octeontx_logtype_mbox;
+
+struct octeontx_mbox_hdr {
+ uint16_t vfid; /* VF index or pf resource index local to the domain */
+ uint8_t coproc; /* Coprocessor id */
+ uint8_t msg; /* Message id */
+ uint8_t res_code; /* Functional layer response code */
+};
+
+int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base);
+int octeontx_mbox_set_reg(uint8_t *reg);
+int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,
+ void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
+
+#endif /* __OCTEONTX_MBOX_H__ */
diff --git a/drivers/common/octeontx/rte_common_octeontx_version.map b/drivers/common/octeontx/rte_common_octeontx_version.map
new file mode 100644
index 00000000..f04b3b7f
--- /dev/null
+++ b/drivers/common/octeontx/rte_common_octeontx_version.map
@@ -0,0 +1,7 @@
+DPDK_18.05 {
+ global:
+
+ octeontx_mbox_set_ram_mbox_base;
+ octeontx_mbox_set_reg;
+ octeontx_mbox_send;
+};
diff --git a/drivers/compress/Makefile b/drivers/compress/Makefile
new file mode 100644
index 00000000..592497f5
--- /dev/null
+++ b/drivers/compress/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/compress/isal/Makefile b/drivers/compress/isal/Makefile
new file mode 100644
index 00000000..95904f64
--- /dev/null
+++ b/drivers/compress/isal/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_isal_comp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# external library dependencies
+LDLIBS += -lisal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_isal_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd_ops.c
+
+# export include files
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/isal/isal_compress_pmd.c b/drivers/compress/isal/isal_compress_pmd.c
new file mode 100644
index 00000000..0f025a3b
--- /dev/null
+++ b/drivers/compress/isal/isal_compress_pmd.c
@@ -0,0 +1,471 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <isa-l.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_compressdev_pmd.h>
+
+#include "isal_compress_pmd_private.h"
+
+#define RTE_COMP_ISAL_WINDOW_SIZE 15
+#define RTE_COMP_ISAL_LEVEL_ZERO 0 /* ISA-L Level 0 used for fixed Huffman */
+#define RTE_COMP_ISAL_LEVEL_ONE 1
+#define RTE_COMP_ISAL_LEVEL_TWO 2
+#define RTE_COMP_ISAL_LEVEL_THREE 3 /* Optimised for AVX512 & AVX2 only */
+
+int isal_logtype_driver;
+
+/* Verify and set private xform parameters */
+int
+isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
+ const struct rte_comp_xform *xform)
+{
+ if (xform == NULL)
+ return -EINVAL;
+
+ /* Set compression private xform variables */
+ if (xform->type == RTE_COMP_COMPRESS) {
+ /* Set private xform type - COMPRESS/DECOMPRESS */
+ priv_xform->type = RTE_COMP_COMPRESS;
+
+ /* Set private xform algorithm */
+ if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) {
+ if (xform->compress.algo == RTE_COMP_ALGO_NULL) {
+ ISAL_PMD_LOG(ERR, "By-pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ priv_xform->compress.algo = RTE_COMP_ALGO_DEFLATE;
+
+ /* Set private xform checksum - raw deflate by default */
+ if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform window size, 32K supported */
+ if (xform->compress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ priv_xform->compress.window_size =
+ RTE_COMP_ISAL_WINDOW_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform huffman type */
+ switch (xform->compress.deflate.huffman) {
+ case(RTE_COMP_HUFFMAN_DEFAULT):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DEFAULT;
+ break;
+ case(RTE_COMP_HUFFMAN_FIXED):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_FIXED;
+ break;
+ case(RTE_COMP_HUFFMAN_DYNAMIC):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DYNAMIC;
+ break;
+ default:
+ ISAL_PMD_LOG(ERR, "Huffman code not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform level.
+ * Checking compliance with compressdev API, -1 <= level => 9
+ */
+ if (xform->compress.level < RTE_COMP_LEVEL_PMD_DEFAULT ||
+ xform->compress.level > RTE_COMP_LEVEL_MAX) {
+ ISAL_PMD_LOG(ERR, "Compression level out of range\n");
+ return -EINVAL;
+ }
+ /* Check for Compressdev API level 0, No compression
+ * not supported in ISA-L
+ */
+ else if (xform->compress.level == RTE_COMP_LEVEL_NONE) {
+ ISAL_PMD_LOG(ERR, "No Compression not supported\n");
+ return -ENOTSUP;
+ }
+ /* If using fixed huffman code, level must be 0 */
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_FIXED) {
+ ISAL_PMD_LOG(DEBUG, "ISA-L level 0 used due to a"
+ " fixed huffman code\n");
+ priv_xform->compress.level = RTE_COMP_ISAL_LEVEL_ZERO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL0_DEFAULT;
+ } else {
+ /* Mapping API levels to ISA-L levels 1,2 & 3 */
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_PMD_DEFAULT:
+ /* Default is 1 if not using fixed huffman */
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_ONE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL1_DEFAULT;
+ break;
+ case RTE_COMP_LEVEL_MIN:
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_ONE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL1_DEFAULT;
+ break;
+ case RTE_COMP_ISAL_LEVEL_TWO:
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_TWO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL2_DEFAULT;
+ break;
+ /* Level 3 or higher requested */
+ default:
+ /* Check for AVX512, to use ISA-L level 3 */
+ if (rte_cpu_get_flag_enabled(
+ RTE_CPUFLAG_AVX512F)) {
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_THREE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL3_DEFAULT;
+ }
+ /* Check for AVX2, to use ISA-L level 3 */
+ else if (rte_cpu_get_flag_enabled(
+ RTE_CPUFLAG_AVX2)) {
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_THREE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL3_DEFAULT;
+ } else {
+ ISAL_PMD_LOG(DEBUG, "Requested ISA-L level"
+ " 3 or above; Level 3 optimized"
+ " for AVX512 & AVX2 only."
+ " level changed to 2.\n");
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_TWO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL2_DEFAULT;
+ }
+ }
+ }
+ }
+
+ /* Set decompression private xform variables */
+ else if (xform->type == RTE_COMP_DECOMPRESS) {
+
+ /* Set private xform type - COMPRESS/DECOMPRESS */
+ priv_xform->type = RTE_COMP_DECOMPRESS;
+
+ /* Set private xform algorithm */
+ if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) {
+ if (xform->decompress.algo == RTE_COMP_ALGO_NULL) {
+ ISAL_PMD_LOG(ERR, "By pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ priv_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE;
+
+ /* Set private xform checksum - raw deflate by default */
+ if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform window size, 32K supported */
+ if (xform->decompress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ priv_xform->decompress.window_size =
+ RTE_COMP_ISAL_WINDOW_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
+/* Stateless Compression Function */
+static int
+process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
+ struct isal_priv_xform *priv_xform)
+{
+ int ret = 0;
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Required due to init clearing level_buf */
+ uint8_t *temp_level_buf = qp->stream->level_buf;
+
+ /* Initialize compression stream */
+ isal_deflate_stateless_init(qp->stream);
+
+ qp->stream->level_buf = temp_level_buf;
+
+ /* Stateless operation, input will be consumed in one go */
+ qp->stream->flush = NO_FLUSH;
+
+ /* set op level & intermediate level buffer */
+ qp->stream->level = priv_xform->compress.level;
+ qp->stream->level_buf_size = priv_xform->level_buffer_size;
+
+ /* Point compression stream structure to input/output buffers */
+ qp->stream->avail_in = op->src.length;
+ qp->stream->next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
+ qp->stream->avail_out = op->m_dst->data_len;
+ qp->stream->next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
+ qp->stream->end_of_stream = 1; /* All input consumed in one go */
+
+ if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Set op huffman code */
+ if (priv_xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_STATIC);
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DEFAULT)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_DEFAULT);
+ /* Dynamically change the huffman code to suit the input data */
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_DEFAULT);
+
+ /* Execute compression operation */
+ ret = isal_deflate_stateless(qp->stream);
+
+ /* Check that output buffer did not run out of space */
+ if (ret == STATELESS_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->stream->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != COMP_OK) {
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ op->consumed = qp->stream->total_in;
+ op->produced = qp->stream->total_out;
+
+ return ret;
+}
+
+/* Stateless Decompression Function */
+static int
+process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret = 0;
+
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Initialize decompression state */
+ isal_inflate_init(qp->state);
+
+ /* Point decompression state structure to input/output buffers */
+ qp->state->avail_in = op->src.length;
+ qp->state->next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
+ qp->state->avail_out = op->m_dst->data_len;
+ qp->state->next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
+
+ if (unlikely(!qp->state->next_in || !qp->state->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Execute decompression operation */
+ ret = isal_inflate_stateless(qp->state);
+
+ if (ret == ISAL_OUT_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->state->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != ISAL_DECOMP_OK) {
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ op->consumed = op->src.length - qp->state->avail_in;
+ op->produced = qp->state->total_out;
+
+return ret;
+}
+
+/* Process compression/decompression operation */
+static int
+process_op(struct isal_comp_qp *qp, struct rte_comp_op *op,
+ struct isal_priv_xform *priv_xform)
+{
+ switch (priv_xform->type) {
+ case RTE_COMP_COMPRESS:
+ process_isal_deflate(op, qp, priv_xform);
+ break;
+ case RTE_COMP_DECOMPRESS:
+ process_isal_inflate(op, qp);
+ break;
+ default:
+ ISAL_PMD_LOG(ERR, "Operation Not Supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+/* Enqueue burst */
+static uint16_t
+isal_comp_pmd_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t i;
+ int retval;
+ int16_t num_enq = RTE_MIN(qp->num_free_elements, nb_ops);
+
+ for (i = 0; i < num_enq; i++) {
+ if (unlikely(ops[i]->op_type != RTE_COMP_OP_STATELESS)) {
+ ops[i]->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ISAL_PMD_LOG(ERR, "Stateful operation not Supported\n");
+ qp->qp_stats.enqueue_err_count++;
+ continue;
+ }
+ retval = process_op(qp, ops[i], ops[i]->private_xform);
+ if (unlikely(retval < 0) ||
+ ops[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+ qp->qp_stats.enqueue_err_count++;
+ }
+ }
+
+ retval = rte_ring_enqueue_burst(qp->processed_pkts, (void *)ops,
+ num_enq, NULL);
+ qp->num_free_elements -= retval;
+ qp->qp_stats.enqueued_count += retval;
+
+ return retval;
+}
+
+/* Dequeue burst */
+static uint16_t
+isal_comp_pmd_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, (void **)ops,
+ nb_ops, NULL);
+ qp->num_free_elements += nb_dequeued;
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Create ISA-L compression device */
+static int
+compdev_isal_create(const char *name, struct rte_vdev_device *vdev,
+ struct rte_compressdev_pmd_init_params *init_params)
+{
+ struct rte_compressdev *dev;
+
+ dev = rte_compressdev_pmd_create(name, &vdev->device,
+ sizeof(struct isal_comp_private), init_params);
+ if (dev == NULL) {
+ ISAL_PMD_LOG(ERR, "failed to create compressdev vdev");
+ return -EFAULT;
+ }
+
+ dev->dev_ops = isal_compress_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = isal_comp_pmd_dequeue_burst;
+ dev->enqueue_burst = isal_comp_pmd_enqueue_burst;
+
+ return 0;
+}
+
+/** Remove compression device */
+static int
+compdev_isal_remove_dev(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev *compdev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ compdev = rte_compressdev_pmd_get_named_dev(name);
+ if (compdev == NULL)
+ return -ENODEV;
+
+ return rte_compressdev_pmd_destroy(compdev);
+}
+
+/** Initialise ISA-L compression device */
+static int
+compdev_isal_probe(struct rte_vdev_device *dev)
+{
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id(),
+ };
+ const char *name, *args;
+ int retval;
+
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ args = rte_vdev_device_args(dev);
+
+ retval = rte_compressdev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ ISAL_PMD_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]\n", args);
+ return -EINVAL;
+ }
+
+ return compdev_isal_create(name, dev, &init_params);
+}
+
+static struct rte_vdev_driver compdev_isal_pmd_drv = {
+ .probe = compdev_isal_probe,
+ .remove = compdev_isal_remove_dev,
+};
+
+RTE_PMD_REGISTER_VDEV(COMPDEV_NAME_ISAL_PMD, compdev_isal_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(COMPDEV_NAME_ISAL_PMD,
+ "socket_id=<int>");
+
+RTE_INIT(isal_init_log);
+
+static void
+isal_init_log(void)
+{
+ isal_logtype_driver = rte_log_register("comp_isal");
+ if (isal_logtype_driver >= 0)
+ rte_log_set_level(isal_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/drivers/compress/isal/isal_compress_pmd_ops.c b/drivers/compress/isal/isal_compress_pmd_ops.c
new file mode 100644
index 00000000..970a0413
--- /dev/null
+++ b/drivers/compress/isal/isal_compress_pmd_ops.c
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <isa-l.h>
+
+#include <rte_common.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_malloc.h>
+
+#include "isal_compress_pmd_private.h"
+
+static const struct rte_compressdev_capabilities isal_pmd_capabilities[] = {
+ {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
+ .window_size = {
+ .min = 15,
+ .max = 15,
+ .increment = 0
+ },
+ },
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+isal_comp_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ int ret = 0;
+ unsigned int n;
+ char mp_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ unsigned int elt_size = sizeof(struct isal_priv_xform);
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ n = snprintf(mp_name, sizeof(mp_name), "compdev_%d_xform_mp",
+ dev->data->dev_id);
+ if (n > sizeof(mp_name)) {
+ ISAL_PMD_LOG(ERR,
+ "Unable to create unique name for xform mempool");
+ return -ENOMEM;
+ }
+
+ internals->priv_xform_mp = rte_mempool_lookup(mp_name);
+
+ if (internals->priv_xform_mp != NULL) {
+ if (((internals->priv_xform_mp)->elt_size != elt_size) ||
+ ((internals->priv_xform_mp)->size <
+ config->max_nb_priv_xforms)) {
+
+ ISAL_PMD_LOG(ERR, "%s mempool already exists with different"
+ " initialization parameters", mp_name);
+ internals->priv_xform_mp = NULL;
+ return -ENOMEM;
+ }
+ } else { /* First time configuration */
+ internals->priv_xform_mp = rte_mempool_create(
+ mp_name, /* mempool name */
+ /* number of elements*/
+ config->max_nb_priv_xforms,
+ elt_size, /* element size*/
+ 0, /* Cache size*/
+ 0, /* private data size */
+ NULL, /* obj initialization constructor */
+ NULL, /* obj initialization constructor arg */
+ NULL, /**< obj constructor*/
+ NULL, /* obj constructor arg */
+ config->socket_id, /* socket id */
+ 0); /* flags */
+ }
+
+ if (internals->priv_xform_mp == NULL) {
+ ISAL_PMD_LOG(ERR, "%s mempool allocation failed", mp_name);
+ return -ENOMEM;
+ }
+
+ dev->data->dev_private = internals;
+
+ return ret;
+}
+
+/** Start device */
+static int
+isal_comp_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+isal_comp_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+isal_comp_pmd_close(struct rte_compressdev *dev)
+{
+ /* Free private data */
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ rte_mempool_free(internals->priv_xform_mp);
+ return 0;
+}
+
+/** Get device statistics */
+static void
+isal_comp_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Get device info */
+static void
+isal_comp_pmd_info_get(struct rte_compressdev *dev __rte_unused,
+ struct rte_compressdev_info *dev_info)
+{
+ if (dev_info != NULL) {
+ dev_info->capabilities = isal_pmd_capabilities;
+ dev_info->feature_flags = RTE_COMPDEV_FF_CPU_AVX512 |
+ RTE_COMPDEV_FF_CPU_AVX2 |
+ RTE_COMPDEV_FF_CPU_AVX |
+ RTE_COMPDEV_FF_CPU_SSE;
+ }
+}
+
+/** Reset device statistics */
+static void
+isal_comp_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Release queue pair */
+static int
+isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ if (qp->stream != NULL)
+ rte_free(qp->stream);
+
+ if (qp->stream->level_buf != NULL)
+ rte_free(qp->stream->level_buf);
+
+ if (qp->state != NULL)
+ rte_free(qp->state);
+
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ rte_free(dev->data->queue_pairs[qp_id]);
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+isal_comp_pmd_qp_create_processed_pkts_ring(struct isal_comp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ISAL_PMD_LOG(DEBUG,
+ "Reusing existing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ISAL_PMD_LOG(ERR,
+ "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+isal_comp_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+struct isal_comp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "isal_compression_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/* Setup a queue pair */
+static int
+isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct isal_comp_qp *qp = NULL;
+ int retval;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ isal_comp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Isa-l compression PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ /* Initialize memory for compression stream structure */
+ qp->stream = rte_zmalloc_socket("Isa-l compression stream ",
+ sizeof(struct isal_zstream), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for compression level buffer */
+ qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf",
+ ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for decompression state structure */
+ qp->state = rte_zmalloc_socket("Isa-l decompression state",
+ sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = isal_comp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = isal_comp_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->num_free_elements = rte_ring_free_count(qp->processed_pkts);
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Set private xform data*/
+static int
+isal_comp_pmd_priv_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **priv_xform)
+{
+ int ret;
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ if (xform == NULL) {
+ ISAL_PMD_LOG(ERR, "Invalid Xform struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(internals->priv_xform_mp, priv_xform)) {
+ ISAL_PMD_LOG(ERR,
+ "Couldn't get object from private xform mempool");
+ return -ENOMEM;
+ }
+
+ ret = isal_comp_set_priv_xform_parameters(*priv_xform, xform);
+ if (ret != 0) {
+ ISAL_PMD_LOG(ERR, "Failed to configure private xform parameters");
+
+ /* Return private xform to mempool */
+ rte_mempool_put(internals->priv_xform_mp, priv_xform);
+ return ret;
+ }
+ return 0;
+}
+
+/** Clear memory of the private xform so it doesn't leave key material behind */
+static int
+isal_comp_pmd_priv_xform_free(struct rte_compressdev *dev, void *priv_xform)
+{
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ /* Zero out the whole structure */
+ if (priv_xform) {
+ memset(priv_xform, 0, sizeof(struct isal_priv_xform));
+ rte_mempool_put(internals->priv_xform_mp, priv_xform);
+ }
+ return 0;
+}
+
+struct rte_compressdev_ops isal_pmd_ops = {
+ .dev_configure = isal_comp_pmd_config,
+ .dev_start = isal_comp_pmd_start,
+ .dev_stop = isal_comp_pmd_stop,
+ .dev_close = isal_comp_pmd_close,
+
+ .stats_get = isal_comp_pmd_stats_get,
+ .stats_reset = isal_comp_pmd_stats_reset,
+
+ .dev_infos_get = isal_comp_pmd_info_get,
+
+ .queue_pair_setup = isal_comp_pmd_qp_setup,
+ .queue_pair_release = isal_comp_pmd_qp_release,
+
+ .private_xform_create = isal_comp_pmd_priv_xform_create,
+ .private_xform_free = isal_comp_pmd_priv_xform_free,
+};
+
+struct rte_compressdev_ops *isal_compress_pmd_ops = &isal_pmd_ops;
diff --git a/drivers/compress/isal/isal_compress_pmd_private.h b/drivers/compress/isal/isal_compress_pmd_private.h
new file mode 100644
index 00000000..46e9fcfa
--- /dev/null
+++ b/drivers/compress/isal/isal_compress_pmd_private.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _ISAL_COMP_PMD_PRIVATE_H_
+#define _ISAL_COMP_PMD_PRIVATE_H_
+
+#define COMPDEV_NAME_ISAL_PMD compress_isal
+/**< ISA-L comp PMD device name */
+
+extern int isal_logtype_driver;
+#define ISAL_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, isal_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+/* private data structure for each ISA-L compression device */
+struct isal_comp_private {
+ struct rte_mempool *priv_xform_mp;
+};
+
+/** ISA-L queue pair */
+struct isal_comp_qp {
+ /* Queue Pair Identifier */
+ uint16_t id;
+ /* Unique Queue Pair Name */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /* Ring for placing process packets */
+ struct rte_ring *processed_pkts;
+ /* Queue pair statistics */
+ struct rte_compressdev_stats qp_stats;
+ /* Compression stream information*/
+ struct isal_zstream *stream;
+ /* Decompression state information*/
+ struct inflate_state *state;
+ /* Number of free elements on ring */
+ uint16_t num_free_elements;
+} __rte_cache_aligned;
+
+/** ISA-L private xform structure */
+struct isal_priv_xform {
+ enum rte_comp_xform_type type;
+ union {
+ struct rte_comp_compress_xform compress;
+ struct rte_comp_decompress_xform decompress;
+ };
+ uint32_t level_buffer_size;
+} __rte_cache_aligned;
+
+/** Set and validate NULL comp private xform parameters */
+extern int
+isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
+ const struct rte_comp_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_compressdev_ops *isal_compress_pmd_ops;
+
+#endif /* _ISAL_COMP_PMD_PRIVATE_H_ */
diff --git a/drivers/compress/isal/meson.build b/drivers/compress/isal/meson.build
new file mode 100644
index 00000000..94c10fd6
--- /dev/null
+++ b/drivers/compress/isal/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 Intel Corporation
+
+dep = dependency('libisal', required: false)
+if not dep.found()
+ build =false
+endif
+
+deps += 'bus_vdev'
+sources = files('isal_compress_pmd.c', 'isal_compress_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lisal'
+
+allow_experimental_apis = true
diff --git a/drivers/compress/isal/rte_pmd_isal_version.map b/drivers/compress/isal/rte_pmd_isal_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/drivers/compress/isal/rte_pmd_isal_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/drivers/compress/meson.build b/drivers/compress/meson.build
new file mode 100644
index 00000000..fb136e1b
--- /dev/null
+++ b/drivers/compress/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+drivers = ['isal']
+
+std_deps = ['compressdev'] # compressdev pulls in all other needed deps
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 628bd142..1d0c88ef 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,15 +6,21 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
-DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += mvsam
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index d06c5444..0a5c1a87 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -3,12 +3,6 @@
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(MAKECMDGOALS),clean)
-ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
-$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
-endif
-endif
-
# library name
LIB = librte_pmd_aesni_gcm.a
@@ -23,9 +17,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_gcm_version.map
# external library dependencies
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
-LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lIPSec_MB
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_bus_vdev
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index 59e504ee..45061669 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -9,8 +9,7 @@
#define LINUX
#endif
-#include <gcm_defines.h>
-#include <aux_funcs.h>
+#include <intel-ipsec-mb.h>
/** Supported vector modes */
enum aesni_gcm_vector_mode {
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 83e54480..80360dd9 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -352,7 +352,7 @@ post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
session->op == AESNI_GMAC_OP_VERIFY) {
uint8_t *digest;
- uint8_t *tag = (uint8_t *)&qp->temp_digest;
+ uint8_t *tag = qp->temp_digest;
if (session->op == AESNI_GMAC_OP_VERIFY)
digest = op->sym->auth.digest.data;
@@ -392,7 +392,7 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct aesni_gcm_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -570,5 +570,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index d9f8fb98..806a95eb 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -3,12 +3,6 @@
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(MAKECMDGOALS),clean)
-ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
-$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
-endif
-endif
-
# library name
LIB = librte_pmd_aesni_mb.a
@@ -23,9 +17,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_mb_version.map
# external library dependencies
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
-LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lIPSec_MB
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_bus_vdev
diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
index 4d596e85..5a1cba6c 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -9,8 +9,7 @@
#define LINUX
#endif
-#include <mb_mgr.h>
-#include <aux_funcs.h>
+#include <intel-ipsec-mb.h>
enum aesni_mb_vector_mode {
RTE_AESNI_MB_NOT_SUPPORTED = 0,
@@ -34,9 +33,12 @@ typedef void (*aes_keyexp_192_t)
(const void *key, void *enc_exp_keys, void *dec_exp_keys);
typedef void (*aes_keyexp_256_t)
(const void *key, void *enc_exp_keys, void *dec_exp_keys);
-
typedef void (*aes_xcbc_expand_key_t)
(const void *key, void *exp_k1, void *k2, void *k3);
+typedef void (*aes_cmac_sub_key_gen_t)
+ (const void *exp_key, void *k2, void *k3);
+typedef void (*aes_cmac_keyexp_t)
+ (const void *key, void *keyexp);
/** Multi-buffer library function pointer table */
struct aesni_mb_op_fns {
@@ -78,9 +80,12 @@ struct aesni_mb_op_fns {
/**< AES192 key expansions */
aes_keyexp_256_t aes256;
/**< AES256 key expansions */
-
aes_xcbc_expand_key_t aes_xcbc;
- /**< AES XCBC key expansions */
+ /**< AES XCBC key epansions */
+ aes_cmac_sub_key_gen_t aes_cmac_subkey;
+ /**< AES CMAC subkey expansions */
+ aes_cmac_keyexp_t aes_cmac_expkey;
+ /**< AES CMAC key expansions */
} keyexp;
/**< Key expansion functions */
} aux;
@@ -123,7 +128,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_sse,
aes_keyexp_192_sse,
aes_keyexp_256_sse,
- aes_xcbc_expand_key_sse
+ aes_xcbc_expand_key_sse,
+ aes_cmac_subkey_gen_sse,
+ aes_keyexp_128_enc_sse
}
}
},
@@ -148,7 +155,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx,
aes_keyexp_192_avx,
aes_keyexp_256_avx,
- aes_xcbc_expand_key_avx
+ aes_xcbc_expand_key_avx,
+ aes_cmac_subkey_gen_avx,
+ aes_keyexp_128_enc_avx
}
}
},
@@ -173,7 +182,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx2,
aes_keyexp_192_avx2,
aes_keyexp_256_avx2,
- aes_xcbc_expand_key_avx2
+ aes_xcbc_expand_key_avx2,
+ aes_cmac_subkey_gen_avx2,
+ aes_keyexp_128_enc_avx2
}
}
},
@@ -198,7 +209,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx512,
aes_keyexp_192_avx512,
aes_keyexp_256_avx512,
- aes_xcbc_expand_key_avx512
+ aes_xcbc_expand_key_avx512,
+ aes_cmac_subkey_gen_avx512,
+ aes_keyexp_128_enc_avx512
}
}
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 636c6c37..bb35c66a 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -2,7 +2,7 @@
* Copyright(c) 2015-2017 Intel Corporation
*/
-#include <des.h>
+#include <intel-ipsec-mb.h>
#include <rte_common.h>
#include <rte_hexdump.h>
@@ -124,6 +124,17 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
return 0;
}
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+ sess->auth.algo = AES_CMAC;
+ (*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
+ sess->auth.cmac.expkey);
+
+ (*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey,
+ sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+ return 0;
+ }
+
+
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_MD5_HMAC:
sess->auth.algo = MD5;
@@ -338,16 +349,19 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = xform->next;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
auth_xform = xform->next;
cipher_xform = xform;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_HASH_ONLY:
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = NULL;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_ONLY:
/*
@@ -366,13 +380,13 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
case AESNI_MB_OP_AEAD_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
sess->aead.aad_len = xform->aead.aad_length;
- sess->aead.digest_len = xform->aead.digest_length;
+ sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_AEAD_HASH_CIPHER:
sess->chain_order = HASH_CIPHER;
sess->aead.aad_len = xform->aead.aad_length;
- sess->aead.digest_len = xform->aead.digest_length;
+ sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_NOT_SUPPORTED:
@@ -517,15 +531,20 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
/* Set authentication parameters */
job->hash_alg = session->auth.algo;
if (job->hash_alg == AES_XCBC) {
- job->_k1_expanded = session->auth.xcbc.k1_expanded;
- job->_k2 = session->auth.xcbc.k2;
- job->_k3 = session->auth.xcbc.k3;
+ job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+ job->u.XCBC._k2 = session->auth.xcbc.k2;
+ job->u.XCBC._k3 = session->auth.xcbc.k3;
} else if (job->hash_alg == AES_CCM) {
job->u.CCM.aad = op->sym->aead.aad.data + 18;
job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+ } else if (job->hash_alg == AES_CMAC) {
+ job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+ job->u.CMAC._skey1 = session->auth.cmac.skey1;
+ job->u.CMAC._skey2 = session->auth.cmac.skey2;
+
} else {
- job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
- job->hashed_auth_key_xor_opad = session->auth.pads.outer;
+ job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
+ job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
}
/* Mutable crypto operation parameters */
@@ -568,11 +587,11 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
* Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
- if (job->hash_alg != AES_CCM)
+ if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC)
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
else
- job->auth_tag_output_len_in_bytes = session->aead.digest_len;
+ job->auth_tag_output_len_in_bytes = session->auth.digest_len;
/* Set IV parameters */
@@ -663,7 +682,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct aesni_mb_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -931,5 +950,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
- cryptodev_aesni_mb_pmd_drv,
+ cryptodev_aesni_mb_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 9d685a09..01530523 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -289,8 +289,27 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
-
-
+ { /* AES CMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 948e091c..a33b2f69 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -66,8 +66,9 @@ static const unsigned auth_truncated_digest_byte_lengths[] = {
[SHA_384] = 24,
[SHA_512] = 32,
[AES_XCBC] = 12,
+ [AES_CMAC] = 16,
[AES_CCM] = 8,
- [NULL_HASH] = 0
+ [NULL_HASH] = 0
};
/**
@@ -91,7 +92,8 @@ static const unsigned auth_digest_byte_lengths[] = {
[SHA_384] = 48,
[SHA_512] = 64,
[AES_XCBC] = 16,
- [NULL_HASH] = 0
+ [AES_CMAC] = 16,
+ [NULL_HASH] = 0
};
/**
@@ -211,14 +213,24 @@ struct aesni_mb_session {
uint8_t k3[16] __rte_aligned(16);
/**< k3. */
} xcbc;
+
+ struct {
+ uint32_t expkey[60] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint32_t skey1[4] __rte_aligned(16);
+ /**< k2. */
+ uint32_t skey2[4] __rte_aligned(16);
+ /**< k3. */
+ } cmac;
/**< Expanded XCBC authentication keys */
};
+ /** digest size */
+ uint16_t digest_len;
+
} auth;
struct {
/** AAD data length */
uint16_t aad_len;
- /** digest size */
- uint16_t digest_len;
} aead;
} __rte_cache_aligned;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index 59fffcf1..fbb08f72 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -654,7 +654,7 @@ process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct armv8_crypto_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -850,5 +850,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index 3817ad7b..c64aef09 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -27,9 +27,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 1,
.max = 32,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 00000000..f51d170f
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 00000000..3ce0f39f
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,2951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <openssl/sha.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA1_H4, SHA1_H3,
+ SHA1_H2, SHA1_H1,
+ SHA1_H0, 0x0U,
+ 0x0U, 0x0U,
+};
+
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA224_H7, SHA224_H6,
+ SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2,
+ SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA256_H7, SHA256_H6,
+ SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2,
+ SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA384_H7, SHA384_H6,
+ SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2,
+ SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA512_H7, SHA512_H6,
+ SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2,
+ SHA512_H1, SHA512_H0,
+};
+
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+ (((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+ uint64_t saved;
+ /**
+ * The portion of the input message that we
+ * didn't consume yet
+ */
+ union {
+ uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+ /* Keccak's state */
+ uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+ /**total 200 ctx size**/
+ };
+ unsigned int byteIndex;
+ /**
+ * 0..7--the next byte after the set one
+ * (starts from 0; 0--none are buffered)
+ */
+ unsigned int wordIndex;
+ /**
+ * 0..24--the next word to integrate input
+ * (starts from 0)
+ */
+ unsigned int capacityWords;
+ /**
+ * the double size of the hash output in
+ * words (e.g. 16 for Keccak 512)
+ */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+ (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+ SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+ SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+ SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+ SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+ SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+ SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+ SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+ SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+ SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+ SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+ 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+ 18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+ 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+ 14, 22, 9, 6, 1
+};
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+ if (xform == NULL)
+ return res;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return CCP_CMD_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return CCP_CMD_HASH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return CCP_CMD_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return CCP_CMD_CIPHER_HASH;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return CCP_CMD_COMBINED;
+ return res;
+}
+
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static void
+keccakf(uint64_t s[25])
+{
+ int i, j, round;
+ uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+ for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+ /* Theta */
+ for (i = 0; i < 5; i++)
+ bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+ s[i + 20];
+
+ for (i = 0; i < 5; i++) {
+ t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+ for (j = 0; j < 25; j += 5)
+ s[j + i] ^= t;
+ }
+
+ /* Rho Pi */
+ t = s[1];
+ for (i = 0; i < 24; i++) {
+ j = keccakf_piln[i];
+ bc[0] = s[j];
+ s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+ t = bc[0];
+ }
+
+ /* Chi */
+ for (j = 0; j < 25; j += 5) {
+ for (i = 0; i < 5; i++)
+ bc[i] = s[j + i];
+ for (i = 0; i < 5; i++)
+ s[j + i] ^= (~bc[(i + 1) % 5]) &
+ bc[(i + 2) % 5];
+ }
+
+ /* Iota */
+ s[0] ^= keccakf_rndc[round];
+ }
+}
+
+static void
+sha3_Init224(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+ unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+ size_t words;
+ unsigned int tail;
+ size_t i;
+ const uint8_t *buf = bufIn;
+
+ if (len < old_tail) {
+ while (len--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+ return;
+ }
+
+ if (old_tail) {
+ len -= old_tail;
+ while (old_tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+
+ ctx->s[ctx->wordIndex] ^= ctx->saved;
+ ctx->byteIndex = 0;
+ ctx->saved = 0;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ words = len / sizeof(uint64_t);
+ tail = len - words * sizeof(uint64_t);
+
+ for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+ const uint64_t t = (uint64_t) (buf[0]) |
+ ((uint64_t) (buf[1]) << 8 * 1) |
+ ((uint64_t) (buf[2]) << 8 * 2) |
+ ((uint64_t) (buf[3]) << 8 * 3) |
+ ((uint64_t) (buf[4]) << 8 * 4) |
+ ((uint64_t) (buf[5]) << 8 * 5) |
+ ((uint64_t) (buf[6]) << 8 * 6) |
+ ((uint64_t) (buf[7]) << 8 * 7);
+ ctx->s[ctx->wordIndex] ^= t;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ while (tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init224(ctx);
+ sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init256(ctx);
+ sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init384(ctx);
+ sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init512(ctx);
+ sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+ uint8_t ipad[sess->auth.block_size];
+ uint8_t opad[sess->auth.block_size];
+ uint8_t *ipad_t, *opad_t;
+ uint32_t *hash_value_be32, hash_temp32[8];
+ uint64_t *hash_value_be64, hash_temp64[8];
+ int i, count;
+ uint8_t *hash_value_sha3;
+
+ opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
+
+ /* considering key size is always equal to block size of algorithm */
+ for (i = 0; i < sess->auth.block_size; i++) {
+ ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+ opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+ }
+
+ switch (sess->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ count = SHA1_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_224(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_224(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_256(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_256(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_384(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_384(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_512(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_512(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ default:
+ CCP_LOG_ERR("Invalid auth algo");
+ return -1;
+ }
+}
+
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+ int i;
+ /* Shift block to left, including carry */
+ for (i = 0; i < bl; i++) {
+ k[i] = l[i] << 1;
+ if (i < bl - 1 && l[i + 1] & 0x80)
+ k[i] |= 1;
+ }
+ /* If MSB set fixup with R */
+ if (l[0] & 0x80)
+ k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+ const EVP_CIPHER *algo;
+ EVP_CIPHER_CTX *ctx;
+ unsigned char *ccp_ctx;
+ size_t i;
+ int dstlen, totlen;
+ unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+ unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+ unsigned char k1[AES_BLOCK_SIZE] = {0};
+ unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+ if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+ algo = EVP_aes_128_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+ algo = EVP_aes_192_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+ algo = EVP_aes_256_cbc();
+ else {
+ CCP_LOG_ERR("Invalid CMAC type length");
+ return -1;
+ }
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx) {
+ CCP_LOG_ERR("ctx creation failed");
+ return -1;
+ }
+ if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+ (unsigned char *)zero_iv) <= 0)
+ goto key_generate_err;
+ if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+ AES_BLOCK_SIZE) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto key_generate_err;
+
+ memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+ prepare_key(k1, dst, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k1[i];
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+ (2 * CCP_SB_BYTES) - 1);
+ prepare_key(k2, k1, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k2[i];
+
+ EVP_CIPHER_CTX_free(ctx);
+
+ return 0;
+
+key_generate_err:
+ CCP_LOG_ERR("CMAC Init failed");
+ return -1;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ size_t i, j, x;
+
+ cipher_xform = &xform->cipher;
+
+ /* set cipher direction */
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ else
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+ /* set cipher key */
+ sess->cipher.key_length = cipher_xform->key.length;
+ rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+ cipher_xform->key.length);
+
+ /* set iv parameters */
+ sess->iv.offset = cipher_xform->iv.offset;
+ sess->iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+ sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_3DES;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo");
+ return -1;
+ }
+
+
+ switch (sess->cipher.engine) {
+ case CCP_ENGINE_AES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length ; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ break;
+ case CCP_ENGINE_3DES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+ for (i = 0; i < 8; i++)
+ sess->cipher.key_ccp[(8 + x) - i - 1] =
+ sess->cipher.key[i + x];
+ break;
+ default:
+ CCP_LOG_ERR("Invalid CCP Engine");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ size_t i;
+
+ auth_xform = &xform->auth;
+
+ sess->auth.digest_length = auth_xform->digest_length;
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ else
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ if (sess->auth_opt) {
+ sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ MD5_DIGEST_SIZE);
+ sess->auth.key_length = auth_xform->key.length;
+ sess->auth.block_size = MD5_BLOCK_SIZE;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else
+ return -1; /* HMAC MD5 not supported on CCP */
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx = (void *)ccp_sha1_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx = (void *)ccp_sha224_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+ if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx = (void *)ccp_sha256_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+ if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx = (void *)ccp_sha384_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+ if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx = (void *)ccp_sha512_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+ if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+ sess->auth.key_length = auth_xform->key.length;
+ /* padding and hash result */
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = AES_BLOCK_SIZE;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ if (sess->auth.key_length == 16)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->auth.key_length == 24)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->auth.key_length == 32)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid CMAC key length");
+ return -1;
+ }
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ sess->auth.key_length);
+ for (i = 0; i < sess->auth.key_length; i++)
+ sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+ sess->auth.key[i];
+ if (generate_cmac_subkeys(sess))
+ return -1;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported hash algo");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ size_t i;
+
+ aead_xform = &xform->aead;
+
+ sess->cipher.key_length = aead_xform->key.length;
+ rte_memcpy(sess->cipher.key, aead_xform->key.data,
+ aead_xform->key.length);
+
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ } else {
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ }
+ sess->aead_algo = aead_xform->algo;
+ sess->auth.aad_length = aead_xform->aad_length;
+ sess->auth.digest_length = aead_xform->digest_length;
+
+ /* set iv parameters */
+ sess->iv.offset = aead_xform->iv.offset;
+ sess->iv.length = aead_xform->iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid aead key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = 0;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ sess->cmd_id = CCP_CMD_COMBINED;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret = 0;
+
+ sess->auth_opt = internals->auth_opt;
+ sess->cmd_id = ccp_get_cmd_id(xform);
+
+ switch (sess->cmd_id) {
+ case CCP_CMD_CIPHER:
+ cipher_xform = xform;
+ break;
+ case CCP_CMD_AUTH:
+ auth_xform = xform;
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case CCP_CMD_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ return -1;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+ if (cipher_xform) {
+ ret = ccp_configure_session_cipher(sess, cipher_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+ return ret;
+ }
+ }
+ if (auth_xform) {
+ ret = ccp_configure_session_auth(sess, auth_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported auth parameters");
+ return ret;
+ }
+ }
+ if (aead_xform) {
+ ret = ccp_configure_session_aead(sess, aead_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported aead parameters");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ count = 1;
+ /**<only op*/
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ }
+ return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ count = 3;
+ /**< op + lsb passthrough cpy to/from*/
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0)
+ count = 6;
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ /**
+ * 1. Load PHash1 = H(k ^ ipad); to LSB
+ * 2. generate IHash = H(hash on meassage with PHash1
+ * as init values);
+ * 3. Retrieve IHash 2 slots for 384/512
+ * 4. Load Phash2 = H(k ^ opad); to LSB
+ * 5. generate FHash = H(hash on Ihash with Phash2
+ * as init value);
+ * 6. Retrieve HMAC output from LSB to host memory
+ */
+ if (session->auth_opt == 0)
+ count = 7;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ count = 1;
+ /**< only op ctx and dst in host memory*/
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ count = 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ count = 4;
+ /**
+ * 1. Op to Perform Ihash
+ * 2. Retrieve result from LSB to host memory
+ * 3. Perform final hash
+ */
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ count = 4;
+ /**
+ * op
+ * extra descriptor in padding case
+ * (k1/k2(255:128) with iv(127:0))
+ * Retrieve result
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ }
+
+ return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->aead_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ }
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ count = 5;
+ /**
+ * 1. Passthru iv
+ * 2. Hash AAD
+ * 3. GCTR
+ * 4. Reload passthru
+ * 5. Hash Final tag
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+ session->auth.algo);
+ }
+ return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ count = ccp_cipher_slot(session);
+ break;
+ case CCP_CMD_AUTH:
+ count = ccp_auth_slot(session);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ case CCP_CMD_HASH_CIPHER:
+ count = ccp_cipher_slot(session);
+ count += ccp_auth_slot(session);
+ break;
+ case CCP_CMD_COMBINED:
+ count = ccp_aead_slot(session);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+
+ }
+
+ return count;
+}
+
+static uint8_t
+algo_select(int sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ switch (sessalgo) {
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv,
+ EVP_PKEY *pkey,
+ int srclen,
+ EVP_MD_CTX *ctx,
+ const EVP_MD *algo,
+ uint16_t d_len)
+{
+ size_t dstlen;
+ unsigned char temp_dst[64];
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ memcpy(dst, temp_dst, d_len);
+ return 0;
+process_auth_err:
+ CCP_LOG_ERR("Process cpu auth failed");
+ return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+ struct rte_crypto_op *op,
+ struct ccp_session *sess,
+ EVP_MD_CTX *ctx)
+{
+ uint8_t *src, *dst;
+ int srclen, status;
+ struct rte_mbuf *mbuf_src, *mbuf_dst;
+ const EVP_MD *algo = NULL;
+ EVP_PKEY *pkey;
+
+ algo_select(sess->auth.algo, &algo);
+ pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+ sess->auth.key_length);
+ mbuf_src = op->sym->m_src;
+ mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+ srclen = op->sym->auth.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+ } else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL) {
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ sess->auth.digest_length);
+ }
+ }
+ status = process_cpu_auth_hmac(src, dst, NULL,
+ pkey, srclen,
+ ctx,
+ algo,
+ sess->auth.digest_length);
+ if (status) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return status;
+ }
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ EVP_PKEY_free(pkey);
+ return 0;
+}
+
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_desc *desc;
+ union ccp_function function;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 0;
+ CCP_CMD_EOM(desc) = 0;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+ CCP_PT_BITWISE(&function) = pst->bit_mod;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = pst->len;
+
+ if (pst->dir) {
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+ } else {
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = 0;
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ }
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, dest_addr_t;
+ struct ccp_passthru pst;
+ uint64_t auth_msg_bits;
+ void *append_ptr;
+ uint8_t *addr;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ addr = session->auth.pre_compute;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+ dest_addr_t = dest_addr;
+
+ /** Load PHash1 to LSB*/
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for IntermediateHash*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = (op->sym->auth.data.length +
+ session->auth.block_size) * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ }
+
+ /** Load PHash2 to LSB*/
+ addr += session->auth.ctx_len;
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for FinalHash*/
+ dest_addr_t += session->auth.offset;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+ session->auth.offset);
+ auth_msg_bits = (session->auth.block_size +
+ session->auth.ctx_len -
+ session->auth.offset) * 8;
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Retrieve hmac output */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr;
+ struct ccp_passthru pst;
+ void *append_ptr;
+ uint64_t auth_msg_bits;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+ /** Passthru sha context*/
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+ session->auth.ctx);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**prepare sha command descriptor*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = op->sym->auth.data.length * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Hash value retrieve */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ struct ccp_passthru pst;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+ *)session->auth.pre_compute);
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /*desc1 for SHA3-Ihash operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ /**sha engine command descriptor for FinalHash*/
+ ctx_paddr += CCP_SHA3_CTX_SIZE;
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+ dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+ CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+ dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+ } else {
+ CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+ }
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *ctx_addr, *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ ctx_addr = session->auth.sha3_ctx;
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for SHA3 operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *src_tb, *append_ptr, *ctx_addr;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+ CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+ if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+ ctx_addr = session->auth.pre_compute;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for aes-cmac command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ } else {
+ ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+ length *= AES_BLOCK_SIZE;
+ non_align_len = op->sym->auth.data.length - length;
+ /* prepare desc for aes-cmac command */
+ /*Command 1*/
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ /*Command 2*/
+ append_ptr = append_ptr + CCP_SB_BYTES;
+ memset(append_ptr, 0, AES_BLOCK_SIZE);
+ src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ length);
+ rte_memcpy(append_ptr, src_tb, non_align_len);
+ append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ }
+ /* Retrieve result */
+ pst.dest_addr = dest_addr;
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *lsb_buf;
+ struct ccp_passthru pst = {0};
+ struct ccp_desc *desc;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ uint8_t *iv;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ function.raw = 0;
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+ if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+ iv, session->iv.length);
+ pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+ CCP_AES_SIZE(&function) = 0x1F;
+ } else {
+ lsb_buf =
+ &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ rte_memcpy(lsb_buf +
+ (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+ pst.src_addr = b_info->lsb_buf_phys +
+ (b_info->lsb_buf_idx * CCP_SB_BYTES);
+ b_info->lsb_buf_idx++;
+ }
+
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (likely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+ key_addr = session->cipher.key_phys;
+
+ /* prepare desc for aes command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ unsigned char *lsb_buf;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *iv;
+ phys_addr_t src_addr, dest_addr, key_addr;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ switch (session->cipher.um.des_mode) {
+ case CCP_DES_MODE_CBC:
+ lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ b_info->lsb_buf_idx++;
+
+ rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ break;
+ case CCP_DES_MODE_CFB:
+ case CCP_DES_MODE_ECB:
+ CCP_LOG_ERR("Unsupported DES cipher mode");
+ return -ENOTSUP;
+ }
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+
+ key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for des command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+ CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.des_mode)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *iv;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint64_t *temp;
+ phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+ phys_addr_t digest_dest_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ key_addr = session->cipher.key_phys;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->aead.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->aead.data.offset);
+ else
+ dest_addr = src_addr;
+ rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
+ digest_dest_addr = op->sym->aead.digest.phys_addr;
+ temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
+ *temp++ = rte_bswap64(session->auth.aad_length << 3);
+ *temp = rte_bswap64(op->sym->aead.data.length << 3);
+
+ non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
+ length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
+
+ aad_addr = op->sym->aead.aad.phys_addr;
+
+ /* CMD1 IV Passthru */
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
+ session->iv.length);
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD2 GHASH-AAD */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD3 : GCTR Plain text */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ if (non_align_len == 0)
+ CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+ else
+ CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD4 : PT to copy IV */
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = AES_BLOCK_SIZE;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD5 : GHASH-Final */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ /* Last block (AAD_len || PT_len)*/
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 1;
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ result = ccp_perform_3des(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ result = ccp_perform_sha(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ if (session->auth_opt == 0)
+ result = -1;
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 6;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 7;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ result = ccp_perform_sha3(op, cmd_q);
+ b_info->desccnt += 1;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ result = ccp_perform_aes_cmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ return -ENOTSUP;
+ }
+
+ return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+ CCP_LOG_ERR("Incorrect chain order");
+ return -1;
+ }
+ result = ccp_perform_aes_gcm(op, cmd_q);
+ b_info->desccnt += 5;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+int
+process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req)
+{
+ int i, result = 0;
+ struct ccp_batch_info *b_info;
+ struct ccp_session *session;
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+ CCP_LOG_ERR("batch info allocation failed");
+ return 0;
+ }
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ b_info->auth_ctr = 0;
+
+ /* populate batch info necessary for dequeue */
+ b_info->op_idx = 0;
+ b_info->lsb_buf_idx = 0;
+ b_info->desccnt = 0;
+ b_info->cmd_q = cmd_q;
+ b_info->lsb_buf_phys =
+ (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+ rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+ b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+ for (i = 0; i < nb_ops; i++) {
+ session = (struct ccp_session *)get_session_private_data(
+ op[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt) {
+ b_info->auth_ctr++;
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ if (result)
+ break;
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt) {
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ if (op[i]->status !=
+ RTE_CRYPTO_OP_STATUS_SUCCESS)
+ continue;
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+
+ if (result)
+ break;
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_COMBINED:
+ result = ccp_crypto_aead(op[i], cmd_q, b_info);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ result = -1;
+ }
+ if (unlikely(result < 0)) {
+ rte_atomic64_add(&b_info->cmd_q->free_slots,
+ (slots_req - b_info->desccnt));
+ break;
+ }
+ b_info->op[i] = op[i];
+ }
+
+ b_info->opcnt = i;
+ b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+
+ rte_wmb();
+ /* Write the new tail address back to the queue register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ b_info->tail_offset);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+ struct ccp_session *session;
+ uint8_t *digest_data, *addr;
+ struct rte_mbuf *m_last;
+ int offset, digest_offset;
+ uint8_t digest_le[64];
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ if (session->cmd_id == CCP_CMD_COMBINED) {
+ digest_data = op->sym->aead.digest.data;
+ digest_offset = op->sym->aead.data.offset +
+ op->sym->aead.data.length;
+ } else {
+ digest_data = op->sym->auth.digest.data;
+ digest_offset = op->sym->auth.data.offset +
+ op->sym->auth.data.length;
+ }
+ m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+ addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+ m_last->data_len - session->auth.ctx_len);
+
+ rte_mb();
+ offset = session->auth.offset;
+
+ if (session->auth.engine == CCP_ENGINE_SHA)
+ if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+ /* All other algorithms require byte
+ * swap done by host
+ */
+ unsigned int i;
+
+ offset = session->auth.ctx_len -
+ session->auth.offset - 1;
+ for (i = 0; i < session->auth.digest_length; i++)
+ digest_le[i] = addr[offset - i];
+ offset = 0;
+ addr = digest_le;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(addr + offset, digest_data,
+ session->auth.digest_length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ } else {
+ if (unlikely(digest_data == 0))
+ digest_data = rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, uint8_t *,
+ digest_offset);
+ rte_memcpy(digest_data, addr + offset,
+ session->auth.digest_length);
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(op->sym->m_src,
+ session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct ccp_qp *qp,
+ struct rte_crypto_op **op_d,
+ struct ccp_batch_info *b_info,
+ uint16_t nb_ops)
+{
+ int i, min_ops;
+ struct ccp_session *session;
+
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+ for (i = 0; i < min_ops; i++) {
+ op_d[i] = b_info->op[b_info->op_idx++];
+ session = (struct ccp_session *)get_session_private_data(
+ op_d[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt == 0)
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ if (session->auth_opt)
+ cpu_crypto_auth(qp, op_d[i],
+ session, auth_ctx);
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt)
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_COMBINED:
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ }
+ }
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ b_info->opcnt -= min_ops;
+ return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops)
+{
+ struct ccp_batch_info *b_info;
+ uint32_t cur_head_offset;
+
+ if (qp->b_info != NULL) {
+ b_info = qp->b_info;
+ if (unlikely(b_info->op_idx > 0))
+ goto success;
+ } else if (rte_ring_dequeue(qp->processed_pkts,
+ (void **)&b_info))
+ return 0;
+
+ if (b_info->auth_ctr == b_info->opcnt)
+ goto success;
+ cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+ CMD_Q_HEAD_LO_BASE);
+
+ if (b_info->head_offset < b_info->tail_offset) {
+ if ((cur_head_offset >= b_info->head_offset) &&
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ } else {
+ if ((cur_head_offset >= b_info->head_offset) ||
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ }
+
+
+success:
+ nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
+ rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+ b_info->desccnt = 0;
+ if (b_info->opcnt > 0) {
+ qp->b_info = b_info;
+ } else {
+ rte_mempool_put(qp->batch_mp, (void *)b_info);
+ qp->b_info = NULL;
+ }
+
+ return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 00000000..882b398a
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
+#define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define CCP_AES_SIZE(p) ((p)->aes.size)
+#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
+#define CCP_AES_MODE(p) ((p)->aes.mode)
+#define CCP_AES_TYPE(p) ((p)->aes.type)
+#define CCP_DES_ENCRYPT(p) ((p)->des.encrypt)
+#define CCP_DES_MODE(p) ((p)->des.mode)
+#define CCP_DES_TYPE(p) ((p)->des.type)
+#define CCP_SHA_TYPE(p) ((p)->sha.type)
+#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
+#define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
+
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+/* MD5 */
+#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE 32
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA224_DIGEST_SIZE 28
+#define SHA224_BLOCK_SIZE 64
+#define SHA3_224_BLOCK_SIZE 144
+
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
+#define SHA3_256_BLOCK_SIZE 136
+
+#define SHA384_DIGEST_SIZE 48
+#define SHA384_BLOCK_SIZE 128
+#define SHA3_384_BLOCK_SIZE 104
+
+#define SHA512_DIGEST_SIZE 64
+#define SHA512_BLOCK_SIZE 128
+#define SHA3_512_BLOCK_SIZE 72
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+#define SHA224_H0 0xc1059ed8UL
+#define SHA224_H1 0x367cd507UL
+#define SHA224_H2 0x3070dd17UL
+#define SHA224_H3 0xf70e5939UL
+#define SHA224_H4 0xffc00b31UL
+#define SHA224_H5 0x68581511UL
+#define SHA224_H6 0x64f98fa7UL
+#define SHA224_H7 0xbefa4fa4UL
+
+#define SHA256_H0 0x6a09e667UL
+#define SHA256_H1 0xbb67ae85UL
+#define SHA256_H2 0x3c6ef372UL
+#define SHA256_H3 0xa54ff53aUL
+#define SHA256_H4 0x510e527fUL
+#define SHA256_H5 0x9b05688cUL
+#define SHA256_H6 0x1f83d9abUL
+#define SHA256_H7 0x5be0cd19UL
+
+#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1 0x629a292a367cd507ULL
+#define SHA384_H2 0x9159015a3070dd17ULL
+#define SHA384_H3 0x152fecd8f70e5939ULL
+#define SHA384_H4 0x67332667ffc00b31ULL
+#define SHA384_H5 0x8eb44a8768581511ULL
+#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7 0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0 0x6a09e667f3bcc908ULL
+#define SHA512_H1 0xbb67ae8584caa73bULL
+#define SHA512_H2 0x3c6ef372fe94f82bULL
+#define SHA512_H3 0xa54ff53a5f1d36f1ULL
+#define SHA512_H4 0x510e527fade682d1ULL
+#define SHA512_H5 0x9b05688c2b3e6c1fULL
+#define SHA512_H6 0x1f83d9abfb41bd6bULL
+#define SHA512_H7 0x5be0cd19137e2179ULL
+
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+ CCP_AES_MODE_ECB = 0,
+ CCP_AES_MODE_CBC,
+ CCP_AES_MODE_OFB,
+ CCP_AES_MODE_CFB,
+ CCP_AES_MODE_CTR,
+ CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+ CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+ CCP_AES_MODE_GHASH_AAD = 0,
+ CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+ CCP_AES_TYPE_128 = 0,
+ CCP_AES_TYPE_192,
+ CCP_AES_TYPE_256,
+ CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+ CCP_DES_MODE_ECB = 0, /* Not supported */
+ CCP_DES_MODE_CBC,
+ CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+ CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */
+ CCP_DES_TYPE_192, /* 168 + 24 parity */
+ CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+ CCP_SHA_TYPE_1 = 1,
+ CCP_SHA_TYPE_224,
+ CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
+ CCP_SHA_TYPE_RSVD1,
+ CCP_SHA_TYPE_RSVD2,
+ CCP_SHA3_TYPE_224,
+ CCP_SHA3_TYPE_256,
+ CCP_SHA3_TYPE_384,
+ CCP_SHA3_TYPE_512,
+ CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+ CCP_CIPHER_ALGO_AES_CBC = 0,
+ CCP_CIPHER_ALGO_AES_ECB,
+ CCP_CIPHER_ALGO_AES_CTR,
+ CCP_CIPHER_ALGO_AES_GCM,
+ CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+ CCP_CIPHER_DIR_DECRYPT = 0,
+ CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+ CCP_AUTH_ALGO_SHA1 = 0,
+ CCP_AUTH_ALGO_SHA1_HMAC,
+ CCP_AUTH_ALGO_SHA224,
+ CCP_AUTH_ALGO_SHA224_HMAC,
+ CCP_AUTH_ALGO_SHA3_224,
+ CCP_AUTH_ALGO_SHA3_224_HMAC,
+ CCP_AUTH_ALGO_SHA256,
+ CCP_AUTH_ALGO_SHA256_HMAC,
+ CCP_AUTH_ALGO_SHA3_256,
+ CCP_AUTH_ALGO_SHA3_256_HMAC,
+ CCP_AUTH_ALGO_SHA384,
+ CCP_AUTH_ALGO_SHA384_HMAC,
+ CCP_AUTH_ALGO_SHA3_384,
+ CCP_AUTH_ALGO_SHA3_384_HMAC,
+ CCP_AUTH_ALGO_SHA512,
+ CCP_AUTH_ALGO_SHA512_HMAC,
+ CCP_AUTH_ALGO_SHA3_512,
+ CCP_AUTH_ALGO_SHA3_512_HMAC,
+ CCP_AUTH_ALGO_AES_CMAC,
+ CCP_AUTH_ALGO_AES_GCM,
+ CCP_AUTH_ALGO_MD5_HMAC,
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+ CCP_AUTH_OP_GENERATE = 0,
+ CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+ bool auth_opt;
+ enum ccp_cmd_order cmd_id;
+ /**< chain order mode */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ struct {
+ enum ccp_cipher_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ enum ccp_des_mode des_mode;
+ } um;
+ union {
+ enum ccp_aes_type aes_type;
+ enum ccp_des_type des_type;
+ } ut;
+ enum ccp_cipher_dir dir;
+ uint64_t key_length;
+ /**< max cipher key size 256 bits */
+ uint8_t key[32];
+ /**ccp key format*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ /**AES-ctr nonce(4) iv(8) ctr*/
+ uint8_t nonce[32];
+ phys_addr_t nonce_phys;
+ } cipher;
+ /**< Cipher Parameters */
+
+ struct {
+ enum ccp_hash_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ } um;
+ union {
+ enum ccp_sha_type sha_type;
+ enum ccp_aes_type aes_type;
+ } ut;
+ enum ccp_hash_op op;
+ uint64_t key_length;
+ /**< max hash key size 144 bytes (struct capabilties) */
+ uint8_t key[144];
+ /**< max be key size of AES is 32*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ uint64_t digest_length;
+ void *ctx;
+ int ctx_len;
+ int offset;
+ int block_size;
+ /**< Buffer to store Software generated precomute values*/
+ /**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+ /**< For CMAC K1 IV and K2 IV*/
+ uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+ /**< SHA3 initial ctx all zeros*/
+ uint8_t sha3_ctx[200];
+ int aad_length;
+ } auth;
+ /**< Authentication Parameters */
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD Algorithm */
+
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+struct ccp_private;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals);
+
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops);
+
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+ uint8_t *data_out);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 00000000..80fe6a45
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,810 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+ struct ccp_private *priv = dev->data->dev_private;
+
+ priv->last_dev = TAILQ_FIRST(&ccp_list);
+ return 0;
+}
+
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+ int i, ret = 0;
+ struct ccp_device *dev;
+ struct ccp_private *priv = cdev->data->dev_private;
+
+ dev = TAILQ_NEXT(priv->last_dev, next);
+ if (unlikely(dev == NULL))
+ dev = TAILQ_FIRST(&ccp_list);
+ priv->last_dev = dev;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->qidx++;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ }
+ return NULL;
+}
+
+int
+ccp_read_hwrng(uint32_t *value)
+{
+ struct ccp_device *dev;
+
+ TAILQ_FOREACH(dev, &ccp_list, next) {
+ void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+ *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+ if (*value) {
+ dev->hwrng_retries = 0;
+ return 0;
+ }
+ }
+ dev->hwrng_retries = 0;
+ }
+ return -1;
+}
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+ uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ CCP_LOG_INFO("re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+ CCP_LOG_ERR("Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+ return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+ unsigned long first_zero;
+
+ first_zero = __builtin_ffsl(~word);
+ return first_zero ? (first_zero - 1) :
+ BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+ uint32_t i;
+ uint32_t nwords = 0;
+
+ nwords = (limit - 1) / BITS_PER_WORD + 1;
+ for (i = 0; i < nwords; i++) {
+ if (addr[i] == 0UL)
+ return i * BITS_PER_WORD;
+ if (addr[i] < ~(0UL))
+ break;
+ }
+ return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_WORD;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_WORD;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+ unsigned long nbits,
+ unsigned long start,
+ unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (!nbits || start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+ /* Handle 1st word. */
+ tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+ start = ccp_round_down(start, BITS_PER_WORD);
+
+ while (!tmp) {
+ start += BITS_PER_WORD;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+ }
+
+ return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr)
+{
+ unsigned long index, end, i;
+
+again:
+ index = ccp_find_next_zero_bit(map, size, start);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = ccp_find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+ struct ccp_device *ccp;
+ int start;
+
+ /* First look at the map for the queue */
+ if (cmd_q->lsb >= 0) {
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+ LSB_SIZE, 0,
+ count);
+ if (start < LSB_SIZE) {
+ ccp_bitmap_set(cmd_q->lsbmap, start, count);
+ return start + cmd_q->lsb * LSB_SIZE;
+ }
+ }
+
+ /* try to get an entry from the shared blocks */
+ ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+ MAX_LSB_CNT * LSB_SIZE,
+ 0, count);
+ if (start <= MAX_LSB_CNT * LSB_SIZE) {
+ ccp_bitmap_set(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ return start * LSB_ITEM_SIZE;
+ }
+ CCP_LOG_ERR("NO LSBs available");
+
+ rte_spinlock_unlock(&ccp->lsb_lock);
+
+ return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+ unsigned int start,
+ unsigned int count)
+{
+ int lsbno = start / LSB_SIZE;
+
+ if (!start)
+ return;
+
+ if (cmd_q->lsb == lsbno) {
+ /* An entry from the private LSB */
+ ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ } else {
+ /* From the shared LSBs */
+ struct ccp_device *ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+ ccp_bitmap_clear(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ }
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+ int q_mask = 1 << cmd_q->id;
+ int weight = 0;
+ int j;
+
+ /* Build a bit mask to know which LSBs
+ * this queue has access to.
+ * Don't bother with segment 0
+ * as it has special
+ * privileges.
+ */
+ cmd_q->lsbmask = 0;
+ status >>= LSB_REGION_WIDTH;
+ for (j = 1; j < MAX_LSB_CNT; j++) {
+ if (status & q_mask)
+ ccp_set_bit(&cmd_q->lsbmask, j);
+
+ status >>= LSB_REGION_WIDTH;
+ }
+
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ weight++;
+
+ printf("Queue %d can access %d LSB regions of mask %lu\n",
+ (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+ return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+ int lsb_cnt, int n_lsbs,
+ unsigned long *lsb_pub)
+{
+ unsigned long qlsb = 0;
+ int bitno = 0;
+ int qlsb_wgt = 0;
+ int i, j;
+
+ /* For each queue:
+ * If the count of potential LSBs available to a queue matches the
+ * ordinal given to us in lsb_cnt:
+ * Copy the mask of possible LSBs for this queue into "qlsb";
+ * For each bit in qlsb, see if the corresponding bit in the
+ * aggregation mask is set; if so, we have a match.
+ * If we have a match, clear the bit in the aggregation to
+ * mark it as no longer available.
+ * If there is no match, clear the bit in qlsb and keep looking.
+ */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+ qlsb_wgt = 0;
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ qlsb_wgt++;
+
+ if (qlsb_wgt == lsb_cnt) {
+ qlsb = cmd_q->lsbmask;
+
+ bitno = ffs(qlsb) - 1;
+ while (bitno < MAX_LSB_CNT) {
+ if (ccp_get_bit(lsb_pub, bitno)) {
+ /* We found an available LSB
+ * that this queue can access
+ */
+ cmd_q->lsb = bitno;
+ ccp_clear_bit(lsb_pub, bitno);
+ break;
+ }
+ ccp_clear_bit(&qlsb, bitno);
+ bitno = ffs(qlsb) - 1;
+ }
+ if (bitno >= MAX_LSB_CNT)
+ return -EINVAL;
+ n_lsbs--;
+ }
+ }
+ return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+ unsigned long lsb_pub = 0, qlsb = 0;
+ int n_lsbs = 0;
+ int bitno;
+ int i, lsb_cnt;
+ int rc = 0;
+
+ rte_spinlock_init(&ccp->lsb_lock);
+
+ /* Create an aggregate bitmap to get a total count of available LSBs */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+ for (i = 0; i < MAX_LSB_CNT; i++)
+ if (ccp_get_bit(&lsb_pub, i))
+ n_lsbs++;
+
+ if (n_lsbs >= ccp->cmd_q_count) {
+ /* We have enough LSBS to give every queue a private LSB.
+ * Brute force search to start with the queues that are more
+ * constrained in LSB choice. When an LSB is privately
+ * assigned, it is removed from the public mask.
+ * This is an ugly N squared algorithm with some optimization.
+ */
+ for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+ lsb_cnt++) {
+ rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+ &lsb_pub);
+ if (rc < 0)
+ return -EINVAL;
+ n_lsbs = rc;
+ }
+ }
+
+ rc = 0;
+ /* What's left of the LSBs, according to the public mask, now become
+ * shared. Any zero bits in the lsb_pub mask represent an LSB region
+ * that can't be used as a shared resource, so mark the LSB slots for
+ * them as "in use".
+ */
+ qlsb = lsb_pub;
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ while (bitno < MAX_LSB_CNT) {
+ ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+ ccp_set_bit(&qlsb, bitno);
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ }
+
+ return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+ int i;
+ uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+ uint64_t status;
+ struct ccp_queue *cmd_q;
+ const struct rte_memzone *q_mz;
+ void *vaddr;
+
+ if (dev == NULL)
+ return -1;
+
+ dev->id = ccp_dev_id++;
+ dev->qidx = 0;
+ vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ if (type == CCP_VERSION_5B) {
+ CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+ CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+ for (i = 0; i < 12; i++) {
+ CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+ CCP_READ_REG(vaddr, TRNG_OUT_REG));
+ }
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+ CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+ CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+ }
+ CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+ /* Copy the private LSB mask to the public registers */
+ status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+ status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+ status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+ dev->cmd_q_count = 0;
+ /* Find available queues */
+ qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+ cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+ cmd_q->dev = dev;
+ cmd_q->id = i;
+ cmd_q->qidx = 0;
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+ cmd_q->reg_base = (uint8_t *)vaddr +
+ CMD_Q_STATUS_INCR * (i + 1);
+
+ /* CCP queue memory */
+ snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+ "%s_%d_%s_%d_%s",
+ "ccp_dev",
+ (int)dev->id, "queue",
+ (int)cmd_q->id, "mem");
+ q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+ cmd_q->qsize, SOCKET_ID_ANY);
+ cmd_q->qbase_addr = (void *)q_mz->addr;
+ cmd_q->qbase_desc = (void *)q_mz->addr;
+ cmd_q->qbase_phys_addr = q_mz->phys_addr;
+
+ cmd_q->qcontrol = 0;
+ /* init control reg to zero */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* Disable the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+ /* Clear the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+ ALL_INTERRUPTS);
+
+ /* Configure size of each virtual queue accessible to host */
+ cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+ cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+ dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ (uint32_t)dma_addr_lo);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+ (uint32_t)dma_addr_lo);
+
+ dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* create LSB Mask map */
+ if (ccp_find_lsb_regions(cmd_q, status))
+ CCP_LOG_ERR("queue doesn't have lsb regions");
+ cmd_q->lsb = -1;
+
+ rte_atomic64_init(&cmd_q->free_slots);
+ rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+ /* unused slot barrier b/w H&T */
+ }
+
+ if (ccp_assign_lsbs(dev))
+ CCP_LOG_ERR("Unable to assign lsb region");
+
+ /* pre-allocate LSB slots */
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->cmd_q[i].sb_key =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_iv =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_sha =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ dev->cmd_q[i].sb_hmac =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ }
+
+ TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+ return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+ if (dev == NULL)
+ return;
+
+ TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+ const struct rte_pci_id *ccp_id,
+ int *type)
+{
+ char filename[PATH_MAX];
+ const struct rte_pci_id *id;
+ uint16_t vendor, device_id;
+ int i;
+ unsigned long tmp;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ vendor = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ device_id = (uint16_t)tmp;
+
+ for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+ if (vendor == id->vendor_id &&
+ device_id == id->device_id) {
+ *type = i;
+ return 1; /* Matched device */
+ }
+ }
+ return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+ uint8_t bus, uint8_t devid,
+ uint8_t function, int ccp_type)
+{
+ struct ccp_device *ccp_dev = NULL;
+ struct rte_pci_device *pci;
+ char filename[PATH_MAX];
+ unsigned long tmp;
+ int uio_fd = -1, i, uio_num;
+ char uio_devname[PATH_MAX];
+ void *map_addr;
+
+ ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+ RTE_CACHE_LINE_SIZE);
+ if (ccp_dev == NULL)
+ goto fail;
+ pci = &(ccp_dev->pci);
+
+ pci->addr.domain = domain;
+ pci->addr.bus = bus;
+ pci->addr.devid = devid;
+ pci->addr.function = function;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.vendor_id = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.device_id = (uint16_t)tmp;
+
+ /* get subsystem_vendor id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+ /* get subsystem_device id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_device",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_device_id = (uint16_t)tmp;
+
+ /* get class_id */
+ snprintf(filename, sizeof(filename), "%s/class",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ /* the least 24 bits are valid: class, subclass, program interface */
+ pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+ /* parse resources */
+ snprintf(filename, sizeof(filename), "%s/resource", dirname);
+ if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+ goto fail;
+
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0) {
+ /*
+ * It may take time for uio device to appear,
+ * wait here and try again
+ */
+ usleep(100000);
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0)
+ goto fail;
+ }
+ snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+ uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+ if (uio_fd < 0)
+ goto fail;
+ if (flock(uio_fd, LOCK_EX | LOCK_NB))
+ goto fail;
+
+ /* Map the PCI memory resource of device */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+ char devname[PATH_MAX];
+ int res_fd;
+
+ if (pci->mem_resource[i].phys_addr == 0)
+ continue;
+ snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+ res_fd = open(devname, O_RDWR);
+ if (res_fd < 0)
+ goto fail;
+ map_addr = mmap(NULL, pci->mem_resource[i].len,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED, res_fd, 0);
+ if (map_addr == MAP_FAILED)
+ goto fail;
+
+ pci->mem_resource[i].addr = map_addr;
+ }
+
+ /* device is valid, add in list */
+ if (ccp_add_device(ccp_dev, ccp_type)) {
+ ccp_remove_device(ccp_dev);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ CCP_LOG_ERR("CCP Device probe failed");
+ if (uio_fd > 0)
+ close(uio_fd);
+ if (ccp_dev)
+ rte_free(ccp_dev);
+ return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+ int dev_cnt = 0;
+ int ccp_type = 0;
+ struct dirent *d;
+ DIR *dir;
+ int ret = 0;
+ int module_idx = 0;
+ uint16_t domain;
+ uint8_t bus, devid, function;
+ char dirname[PATH_MAX];
+
+ module_idx = ccp_check_pci_uio_module();
+ if (module_idx < 0)
+ return -1;
+
+ TAILQ_INIT(&ccp_list);
+ dir = opendir(SYSFS_PCI_DEVICES);
+ if (dir == NULL)
+ return -1;
+ while ((d = readdir(dir)) != NULL) {
+ if (d->d_name[0] == '.')
+ continue;
+ if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+ &domain, &bus, &devid, &function) != 0)
+ continue;
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_PCI_DEVICES, d->d_name);
+ if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+ printf("CCP : Detected CCP device with ID = 0x%x\n",
+ ccp_id[ccp_type].device_id);
+ ret = ccp_probe_device(dirname, domain, bus, devid,
+ function, ccp_type);
+ if (ret == 0)
+ dev_cnt++;
+ }
+ }
+ closedir(dir);
+ return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 00000000..de3e4bcc
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,495 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES 5
+#define CCP_MAX_TRNG_RETRIES 10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG 0x000
+#define TRNG_OUT_REG 0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET 0x00
+#define CMD_QUEUE_PRIO_OFFSET 0x04
+#define CMD_REQID_CONFIG_OFFSET 0x08
+#define CMD_CMD_TIMEOUT_OFFSET 0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET 0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+
+#define CMD_CONFIG_0_OFFSET 0x6000
+#define CMD_TRNG_CTL_OFFSET 0x6008
+#define CMD_AES_MASK_OFFSET 0x6010
+#define CMD_CLK_GATE_CTL_OFFSET 0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN 0x1
+#define CMD_Q_SIZE 0x1F
+#define CMD_Q_SHIFT 3
+#define COMMANDS_PER_QUEUE 2048
+
+#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+ CMD_Q_SIZE)
+#define Q_DESC_SIZE sizeof(struct ccp_desc)
+#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION 0x1
+#define INT_ERROR 0x2
+#define INT_QUEUE_STOPPED 0x4
+#define ALL_INTERRUPTS (INT_COMPLETION| \
+ INT_ERROR| \
+ INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH 5
+#define MAX_LSB_CNT 8
+
+#define LSB_SIZE 16
+#define LSB_ITEM_SIZE 32
+#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE)
+
+/* General CCP Defines */
+
+#define CCP_SB_BYTES 32
+/* Word 0 */
+#define CCP_CMD_DW0(p) ((p)->dw0)
+#define CCP_CMD_SOC(p) (CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p) (CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p) (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p) (CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p) (CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p) (CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p) (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p) ((p)->length)
+#define CCP_CMD_LEN(p) (CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p) ((p)->src_lo)
+#define CCP_CMD_SRC_LO(p) (CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p) ((p)->dw3)
+#define CCP_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p) ((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p) ((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p) ((p)->dw4)
+#define CCP_CMD_DST_LO(p) (CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p) ((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p) (CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p) ((p)->key_lo)
+#define CCP_CMD_KEY_LO(p) (CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p) ((p)->dw7)
+#define CCP_CMD_KEY_HI(p) ((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
+
+/* bitmap */
+enum {
+ BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b) ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+ CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+ (~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+ (~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+ uint32_t value)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+ ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+ ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+ CCP_VERSION_5A = 0,
+ CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+ struct ccp_device *dev;
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+
+ rte_atomic64_t free_slots;
+ /**< available free slots updated from enq/deq calls */
+
+ /* Queue identifier */
+ uint64_t id; /**< queue id */
+ uint64_t qidx; /**< queue index */
+ uint64_t qsize; /**< queue size */
+
+ /* Queue address */
+ struct ccp_desc *qbase_desc;
+ void *qbase_addr;
+ phys_addr_t qbase_phys_addr;
+ /**< queue-page registers addr */
+ void *reg_base;
+
+ uint32_t qcontrol;
+ /**< queue ctrl reg */
+
+ int lsb;
+ /**< lsb region assigned to queue */
+ unsigned long lsbmask;
+ /**< lsb regions queue can access */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+ /**< all lsb resources which queue is using */
+ uint32_t sb_key;
+ /**< lsb assigned for queue */
+ uint32_t sb_iv;
+ /**< lsb assigned for iv */
+ uint32_t sb_sha;
+ /**< lsb assigned for sha ctx */
+ uint32_t sb_hmac;
+ /**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+ TAILQ_ENTRY(ccp_device) next;
+ int id;
+ /**< ccp dev id on platform */
+ struct ccp_queue cmd_q[MAX_HW_QUEUES];
+ /**< ccp queue */
+ int cmd_q_count;
+ /**< no. of ccp Queues */
+ struct rte_pci_device pci;
+ /**< ccp pci identifier */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+ /**< shared lsb mask of ccp */
+ rte_spinlock_t lsb_lock;
+ /**< protection for shared lsb region allocation */
+ int qidx;
+ /**< current queue index */
+ int hwrng_retries;
+ /**< retry counter for CCP TRNG */
+} __rte_cache_aligned;
+
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+ CCP_ENGINE_AES = 0,
+ CCP_ENGINE_XTS_AES_128,
+ CCP_ENGINE_3DES,
+ CCP_ENGINE_SHA,
+ CCP_ENGINE_RSA,
+ CCP_ENGINE_PASSTHRU,
+ CCP_ENGINE_ZLIB_DECOMPRESS,
+ CCP_ENGINE_ECC,
+ CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+ CCP_PASSTHRU_BITWISE_NOOP = 0,
+ CCP_PASSTHRU_BITWISE_AND,
+ CCP_PASSTHRU_BITWISE_OR,
+ CCP_PASSTHRU_BITWISE_XOR,
+ CCP_PASSTHRU_BITWISE_MASK,
+ CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+ CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+ CCP_PASSTHRU_BYTESWAP_32BIT,
+ CCP_PASSTHRU_BYTESWAP_256BIT,
+ CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+ phys_addr_t src_addr;
+ phys_addr_t dest_addr;
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+ int len;
+ int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } aes;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } des;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t rsvd:5;
+ uint16_t type:2;
+ } aes_xts;
+ struct {
+ uint16_t rsvd1:10;
+ uint16_t type:4;
+ uint16_t rsvd2:1;
+ } sha;
+ struct {
+ uint16_t mode:3;
+ uint16_t size:12;
+ } rsa;
+ struct {
+ uint16_t byteswap:2;
+ uint16_t bitwise:3;
+ uint16_t reflect:2;
+ uint16_t rsvd:8;
+ } pt;
+ struct {
+ uint16_t rsvd:13;
+ } zlib;
+ struct {
+ uint16_t size:10;
+ uint16_t type:2;
+ uint16_t mode:3;
+ } ecc;
+ uint16_t raw;
+};
+
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+ uint32_t soc:1;
+ uint32_t ioc:1;
+ uint32_t rsvd1:1;
+ uint32_t init:1;
+ uint32_t eom:1;
+ uint32_t function:15;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t rsvd2:7;
+};
+
+struct dword3 {
+ uint32_t src_hi:16;
+ uint32_t src_mem:2;
+ uint32_t lsb_cxt_id:8;
+ uint32_t rsvd1:5;
+ uint32_t fixed:1;
+};
+
+union dword4 {
+ uint32_t dst_lo; /* NON-SHA */
+ uint32_t sha_len_lo; /* SHA */
+};
+
+union dword5 {
+ struct {
+ uint32_t dst_hi:16;
+ uint32_t dst_mem:2;
+ uint32_t rsvd1:13;
+ uint32_t fixed:1;
+ }
+ fields;
+ uint32_t sha_len_hi;
+};
+
+struct dword7 {
+ uint32_t key_hi:16;
+ uint32_t key_mem:2;
+ uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+ struct dword0 dw0;
+ uint32_t length;
+ uint32_t src_lo;
+ struct dword3 dw3;
+ union dword4 dw4;
+ union dword5 dw5;
+ uint32_t key_lo;
+ struct dword7 dw7;
+};
+
+/**
+ * ccp memory type
+ */
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_SB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE_LAST,
+};
+
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+ CCP_CMD_CIPHER = 0,
+ CCP_CMD_AUTH,
+ CCP_CMD_CIPHER_HASH,
+ CCP_CMD_HASH_CIPHER,
+ CCP_CMD_COMBINED,
+ CCP_CMD_NOT_SUPPORTED,
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+ return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+ return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 00000000..59152ca5
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+ "igb_uio",
+ "uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+ FILE *fp;
+ int i;
+ char buf[BUFSIZ];
+
+ fp = fopen(PROC_MODULES, "r");
+ if (fp == NULL)
+ return -1;
+ i = 0;
+ while (uio_module_names[i] != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (!strncmp(buf, uio_module_names[i],
+ strlen(uio_module_names[i])))
+ return i;
+ }
+ i++;
+ rewind(fp);
+ }
+ printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+ return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+ /* first split on ':' */
+ union splitaddr {
+ struct {
+ char *domain;
+ char *bus;
+ char *devid;
+ char *function;
+ };
+ char *str[PCI_FMT_NVAL];
+ /* last element-separator is "." not ":" */
+ } splitaddr;
+
+ char *buf_copy = strndup(buf, bufsize);
+
+ if (buf_copy == NULL)
+ return -1;
+
+ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+ != PCI_FMT_NVAL - 1)
+ goto error;
+ /* final split is on '.' between devid and function */
+ splitaddr.function = strchr(splitaddr.devid, '.');
+ if (splitaddr.function == NULL)
+ goto error;
+ *splitaddr.function++ = '\0';
+
+ /* now convert to int values */
+ errno = 0;
+ *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+ *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+ *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+ *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+ if (errno != 0)
+ goto error;
+
+ free(buf_copy); /* free the copy made with strdup */
+ return 0;
+error:
+ free(buf_copy);
+ return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ f = fopen(filename, "r");
+ if (f == NULL)
+ return -1;
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ fclose(f);
+ return -1;
+ }
+ *val = strtoul(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags)
+{
+ union pci_resource_info {
+ struct {
+ char *phys_addr;
+ char *end_addr;
+ char *flags;
+ };
+ char *ptrs[PCI_RESOURCE_FMT_NVAL];
+ } res_info;
+
+ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+ return -1;
+ errno = 0;
+ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ *end_addr = strtoull(res_info.end_addr, NULL, 16);
+ *flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0)
+ return -1;
+
+ return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *fp;
+ char buf[BUFSIZ];
+ int i;
+ uint64_t phys_addr, end_addr, flags;
+
+ fp = fopen(filename, "r");
+ if (fp == NULL)
+ return -1;
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ goto error;
+ if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+ &phys_addr, &end_addr, &flags) < 0)
+ goto error;
+
+ if (flags & IORESOURCE_MEM) {
+ dev->mem_resource[i].phys_addr = phys_addr;
+ dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ /* not mapped for now */
+ dev->mem_resource[i].addr = NULL;
+ }
+ }
+ fclose(fp);
+ return 0;
+
+error:
+ fclose(fp);
+ return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+ DIR *dir;
+ struct dirent *e;
+ char dirname_uio[PATH_MAX];
+ unsigned int uio_num;
+ int ret = -1;
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX
+ */
+ snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+ dir = opendir(dirname_uio);
+ if (dir == NULL) {
+ /* retry with the parent directory might be different kernel version*/
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1;
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+ }
+ closedir(dir);
+ return ret;
+
+
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 00000000..7ed3bac4
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+ struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 00000000..80b75ccb
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,848 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+
+#define CCP_BASE_SYM_CRYPTO_CAPABILITIES \
+ { /* SHA1 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 HMAC*/ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 1, \
+ .max = 144, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 1, \
+ .max = 136, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 1, \
+ .max = 104, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 1, \
+ .max = 72, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /*AES-CMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES ECB */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_ECB, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 65535, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ }, } \
+ }, } \
+ }
+
+#define CCP_EXTRA_SYM_CRYPTO_CAPABILITIES \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap[] = {
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap_complete[] = {
+ CCP_EXTRA_SYM_CRYPTO_CAPABILITIES,
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+ return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = ccp_crypto_cap;
+ if (internals->auth_opt == 1)
+ dev_info->capabilities = ccp_crypto_cap_complete;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct ccp_qp *qp;
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+ rte_ring_free(qp->processed_pkts);
+ rte_mempool_free(qp->batch_mp);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct ccp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "ccp_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->size >= ring_size) {
+ CCP_LOG_INFO(
+ "Reusing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+ CCP_LOG_INFO(
+ "Unable to reuse ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+ struct ccp_qp *qp;
+ int retval = 0;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ ccp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ CCP_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->dev = dev;
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = ccp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ CCP_LOG_ERR("Failed to create unique name for ccp qp");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ CCP_LOG_ERR("Failed to create batch info ring");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = session_pool;
+
+ /* mempool for batch info */
+ qp->batch_mp = rte_mempool_create(
+ qp->name,
+ qp_conf->nb_descriptors,
+ sizeof(struct ccp_batch_info),
+ RTE_CACHE_LINE_SIZE,
+ 0, NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (qp->batch_mp == NULL)
+ goto qp_setup_cleanup;
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ dev->data->queue_pairs[qp_id] = NULL;
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static int
+ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
+ uint16_t queue_pair_id __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+static int
+ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
+ uint16_t queue_pair_id __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static unsigned
+ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ void *sess_private_data;
+ struct ccp_private *internals;
+
+ if (unlikely(sess == NULL || xform == NULL)) {
+ CCP_LOG_ERR("Invalid session struct or xform");
+ return -ENOMEM;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CCP_LOG_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ internals = (struct ccp_private *)dev->data->dev_private;
+ ret = ccp_set_session_parameters(sess_private_data, xform, internals);
+ if (ret != 0) {
+ CCP_LOG_ERR("failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+static void
+ccp_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_mempool_put(sess_mp, sess_priv);
+ memset(sess_priv, 0, sizeof(struct ccp_session));
+ set_session_private_data(sess, index, NULL);
+ }
+}
+
+struct rte_cryptodev_ops ccp_ops = {
+ .dev_configure = ccp_pmd_config,
+ .dev_start = ccp_pmd_start,
+ .dev_stop = ccp_pmd_stop,
+ .dev_close = ccp_pmd_close,
+
+ .stats_get = ccp_pmd_stats_get,
+ .stats_reset = ccp_pmd_stats_reset,
+
+ .dev_infos_get = ccp_pmd_info_get,
+
+ .queue_pair_setup = ccp_pmd_qp_setup,
+ .queue_pair_release = ccp_pmd_qp_release,
+ .queue_pair_start = ccp_pmd_qp_start,
+ .queue_pair_stop = ccp_pmd_qp_stop,
+ .queue_pair_count = ccp_pmd_qp_count,
+
+ .session_get_size = ccp_pmd_session_get_size,
+ .session_configure = ccp_pmd_session_configure,
+ .session_clear = ccp_pmd_session_clear,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 00000000..f4498048
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+#include "ccp_crypto.h"
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS 1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+#include "ccp_dev.h"
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned int max_nb_sessions; /**< Max number of sessions */
+ uint8_t crypto_num_dev; /**< Number of working crypto devices */
+ bool auth_opt; /**< Authentication offload option */
+ struct ccp_device *last_dev; /**< Last working crypto device */
+};
+
+/* CCP batch info */
+struct ccp_batch_info {
+ struct rte_crypto_op *op[CCP_MAX_BURST];
+ /**< optable populated at enque time from app*/
+ int op_idx;
+ struct ccp_queue *cmd_q;
+ uint16_t opcnt;
+ /**< no. of crypto ops in batch*/
+ int desccnt;
+ /**< no. of ccp queue descriptors*/
+ uint32_t head_offset;
+ /**< ccp queue head tail offsets time of enqueue*/
+ uint32_t tail_offset;
+ uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+ phys_addr_t lsb_buf_phys;
+ /**< LSB intermediate buf for passthru */
+ int lsb_buf_idx;
+ uint16_t auth_ctr;
+ /**< auth only ops batch for CPU based auth */
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *batch_mp;
+ /**< Session Mempool for batch info */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ struct ccp_batch_info *b_info;
+ /**< Store ops pulled out of queue */
+ struct rte_cryptodev *dev;
+ /**< rte crypto device to which this qp belongs */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/meson.build b/drivers/crypto/ccp/meson.build
new file mode 100644
index 00000000..e43b0059
--- /dev/null
+++ b/drivers/crypto/ccp/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+deps += 'bus_vdev'
+deps += 'bus_pci'
+
+sources = files('rte_ccp_pmd.c',
+ 'ccp_crypto.c',
+ 'ccp_dev.c',
+ 'ccp_pci.c',
+ 'ccp_pmd_ops.c')
+
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 00000000..2061f465
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+
+#include "ccp_crypto.h"
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
+uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params def_p;
+ bool auth_opt;
+};
+
+#define CCP_CRYPTODEV_PARAM_NAME ("name")
+#define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id")
+#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs")
+#define CCP_CRYPTODEV_PARAM_MAX_NB_SESS ("max_nb_sessions")
+#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt")
+
+const char *ccp_pmd_valid_params[] = {
+ CCP_CRYPTODEV_PARAM_NAME,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ CCP_CRYPTODEV_PARAM_MAX_NB_SESS,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+};
+
+/** ccp pmd auth option */
+enum ccp_pmd_auth_opt {
+ CCP_PMD_AUTH_OPT_CCP = 0,
+ CCP_PMD_AUTH_OPT_CPU,
+};
+
+/** parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CCP_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** parse name argument */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CCP_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** parse authentication operation option */
+static int
+parse_auth_opt_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct ccp_pmd_init_params *params = extra_args;
+ int i;
+
+ i = atoi(value);
+ if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) {
+ CCP_LOG_ERR("Invalid ccp pmd auth option. "
+ "0->auth on CCP(default), "
+ "1->auth on CPU\n");
+ return -EINVAL;
+ }
+ params->auth_opt = i;
+ return 0;
+}
+
+static int
+ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ ccp_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_MAX_NB_SESS,
+ &parse_integer_arg,
+ &params->def_p.max_nb_sessions);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+ &parse_auth_opt_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
+{
+ struct ccp_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session == NULL))
+ return NULL;
+
+ sess = (struct ccp_session *)
+ get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ void *_sess;
+ void *_sess_private_data = NULL;
+ struct ccp_private *internals;
+
+ if (rte_mempool_get(qp->sess_mp, &_sess))
+ return NULL;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct ccp_session *)_sess_private_data;
+
+ internals = (struct ccp_private *)qp->dev->data->dev_private;
+ if (unlikely(ccp_set_session_parameters(sess, op->sym->xform,
+ internals) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session,
+ ccp_cryptodev_driver_id,
+ _sess_private_data);
+ }
+
+ return sess;
+}
+
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_session *sess = NULL;
+ struct ccp_qp *qp = queue_pair;
+ struct ccp_queue *cmd_q;
+ struct rte_cryptodev *dev = qp->dev;
+ uint16_t i, enq_cnt = 0, slots_req = 0;
+
+ if (nb_ops == 0)
+ return 0;
+
+ if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_ccp_session(qp, ops[i]);
+ if (unlikely(sess == NULL) && (i == 0)) {
+ qp->qp_stats.enqueue_err_count++;
+ return 0;
+ } else if (sess == NULL) {
+ nb_ops = i;
+ break;
+ }
+ slots_req += ccp_compute_slot_count(sess);
+ }
+
+ cmd_q = ccp_allot_queue(dev, slots_req);
+ if (unlikely(cmd_q == NULL))
+ return 0;
+
+ enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+ qp->qp_stats.enqueued_count += enq_cnt;
+ return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_qp *qp = queue_pair;
+ uint16_t nb_dequeued = 0, i;
+
+ nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+ /* Free session if a session-less crypto op */
+ for (i = 0; i < nb_dequeued; i++)
+ if (unlikely(ops[i]->sess_type ==
+ RTE_CRYPTO_OP_SESSIONLESS)) {
+ rte_mempool_put(qp->sess_mp,
+ ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+ },
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+ },
+ {.device_id = 0},
+};
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
+{
+ const char *name;
+
+ ccp_pmd_init_done = 0;
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct ccp_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct ccp_private *internals;
+ uint8_t cryptodev_cnt = 0;
+
+ if (init_params->def_p.name[0] == '\0')
+ snprintf(init_params->def_p.name,
+ sizeof(init_params->def_p.name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_create(init_params->def_p.name,
+ &vdev->device,
+ &init_params->def_p);
+ if (dev == NULL) {
+ CCP_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+ if (cryptodev_cnt == 0) {
+ CCP_LOG_ERR("failed to detect CCP crypto device");
+ goto init_error;
+ }
+
+ printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+ dev->driver_id = ccp_cryptodev_driver_id;
+
+ /* register rx/tx burst functions for data path */
+ dev->dev_ops = ccp_pmd_ops;
+ dev->enqueue_burst = ccp_pmd_enqueue_burst;
+ dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->def_p.max_nb_sessions;
+ internals->auth_opt = init_params->auth_opt;
+ internals->crypto_num_dev = cryptodev_cnt;
+
+ return 0;
+
+init_error:
+ CCP_LOG_ERR("driver %s: %s() failed",
+ init_params->def_p.name, __func__);
+ cryptodev_ccp_remove(vdev);
+
+ return -EFAULT;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
+{
+ int rc = 0;
+ const char *name;
+ struct ccp_pmd_init_params init_params = {
+ .def_p = {
+ "",
+ sizeof(struct ccp_private),
+ rte_socket_id(),
+ CCP_PMD_MAX_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ },
+ .auth_opt = CCP_PMD_AUTH_OPT_CCP,
+ };
+ const char *input_args;
+
+ if (ccp_pmd_init_done) {
+ RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+ return -EFAULT;
+ }
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ ccp_pmd_parse_input_args(&init_params, input_args);
+ init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.def_p.socket_id);
+ RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+ init_params.def_p.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
+ init_params.def_p.max_nb_sessions);
+ RTE_LOG(INFO, PMD, "Authentication offload to %s\n",
+ ((init_params.auth_opt == 0) ? "CCP" : "CPU"));
+
+ rc = cryptodev_ccp_create(name, vdev, &init_params);
+ if (rc)
+ return rc;
+ ccp_pmd_init_done = 1;
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+ .probe = cryptodev_ccp_probe,
+ .remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int> "
+ "ccp_auth_opt=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,
+ ccp_cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index cb6c63e6..da3d8f84 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -18,13 +18,8 @@ LIB = librte_pmd_dpaa2_sec.a
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
CFLAGS += -D _GNU_SOURCE
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 9a790ddd..56fa969d 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -30,6 +30,9 @@
#include "dpaa2_sec_priv.h"
#include "dpaa2_sec_logs.h"
+/* Required types */
+typedef uint64_t dma_addr_t;
+
/* RTA header files */
#include <hw/desc/ipsec.h>
#include <hw/desc/algo.h>
@@ -56,6 +59,8 @@ enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
static uint8_t cryptodev_driver_id;
+int dpaa2_logtype_sec;
+
static inline int
build_proto_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
@@ -77,11 +82,11 @@ build_proto_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
- DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
+ DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
/* save physical address of mbuf */
op->sym->aead.digest.phys_addr = mbuf->buf_iova;
- mbuf->buf_iova = (uint64_t)op;
+ mbuf->buf_iova = (size_t)op;
return 0;
}
@@ -113,12 +118,12 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
- RTE_LOG(ERR, PMD, "GCM SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
@@ -132,11 +137,11 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
"iv-len=%d data_off: 0x%x\n",
sym_op->aead.data.offset,
sym_op->aead.data.length,
- sym_op->aead.digest.length,
+ sess->digest_length,
sess->iv.length,
sym_op->m_src->data_off);
@@ -264,12 +269,12 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
*/
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "GCM: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
@@ -297,11 +302,11 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
+ DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
"iv-len=%d data_off: 0x%x\n",
sym_op->aead.data.offset,
sym_op->aead.data.length,
- sym_op->aead.digest.length,
+ sess->digest_length,
sess->iv.length,
sym_op->m_src->data_off);
@@ -409,12 +414,12 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
- RTE_LOG(ERR, PMD, "AUTHENC SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
@@ -428,16 +433,16 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG,
- "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
- "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
- sym_op->auth.data.offset,
- sym_op->auth.data.length,
- sym_op->auth.digest.length,
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_SG_EXT(op_fle);
@@ -558,12 +563,12 @@ build_authenc_fd(dpaa2_sec_session *sess,
*/
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
@@ -591,15 +596,16 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
- "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
- sym_op->auth.data.offset,
- sym_op->auth.data.length,
- sess->digest_length,
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -686,13 +692,13 @@ static inline int build_auth_sg_fd(
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
- RTE_LOG(ERR, PMD, "AUTH SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
/* first FLE entry used to store mbuf and session ctxt */
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
sge = fle + 3;
@@ -762,7 +768,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "AUTH Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -772,8 +778,8 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
if (likely(bpid < MAX_BPID)) {
@@ -859,13 +865,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (!fle) {
- RTE_LOG(ERR, PMD, "CIPHER SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
/* first FLE entry used to store mbuf and session ctxt */
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
@@ -873,12 +879,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
flc = &priv->flc_desc[0].flc;
- PMD_TX_LOG(DEBUG,
- "CIPHER SG: cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* o/p fle */
DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -901,10 +908,10 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
}
DPAA2_SET_FLE_FIN(sge);
- PMD_TX_LOG(DEBUG,
- "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
- flc, fle, fle->addr_hi, fle->addr_lo,
- fle->length);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
/* i/p fle */
mbuf = sym_op->m_src;
@@ -944,13 +951,14 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG,
- "CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[bpid].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
return 0;
}
@@ -976,7 +984,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "CIPHER: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -986,8 +994,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
@@ -1012,12 +1020,13 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG,
- "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x",
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
@@ -1025,10 +1034,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle->length = sym_op->cipher.data.length + sess->iv.length;
- PMD_TX_LOG(DEBUG,
- "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
- flc, fle, fle->addr_hi, fle->addr_lo,
- fle->length);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
fle++;
@@ -1049,13 +1058,14 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FLE_FIN(sge);
DPAA2_SET_FLE_FIN(fle);
- PMD_TX_LOG(DEBUG,
- "CIPHER: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[bpid].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
return 0;
}
@@ -1095,7 +1105,7 @@ build_sec_fd(struct rte_crypto_op *op,
break;
case DPAA2_SEC_HASH_CIPHER:
default:
- RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ DPAA2_SEC_ERR("error: Unsupported session");
}
} else {
switch (sess->ctxt_type) {
@@ -1116,7 +1126,7 @@ build_sec_fd(struct rte_crypto_op *op,
break;
case DPAA2_SEC_HASH_CIPHER:
default:
- RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ DPAA2_SEC_ERR("error: Unsupported session");
}
}
return ret;
@@ -1143,7 +1153,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
return 0;
if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
+ DPAA2_SEC_ERR("sessionless crypto op not supported");
return 0;
}
/*Prepare enqueue descriptor*/
@@ -1152,14 +1162,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
qbman_eq_desc_set_response(&eqdesc, 0, 0);
qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
- if (!DPAA2_PER_LCORE_SEC_DPIO) {
- ret = dpaa2_affine_qbman_swp_sec();
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_SEC_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ swp = DPAA2_PER_LCORE_PORTAL;
while (nb_ops) {
frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
@@ -1171,8 +1181,8 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
bpid = mempool_to_bpid(mb_pool);
ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
if (ret) {
- PMD_DRV_LOG(ERR, "error: Improper packet"
- " contents for crypto operation\n");
+ DPAA2_SEC_ERR("error: Improper packet contents"
+ " for crypto operation");
goto skip_tx;
}
ops++;
@@ -1206,7 +1216,7 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
- op = (struct rte_crypto_op *)mbuf->buf_iova;
+ op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
op->sym->aead.digest.phys_addr = 0L;
@@ -1236,8 +1246,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
- PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
- fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+ fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
/* we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
@@ -1248,11 +1258,10 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
if (unlikely(DPAA2_GET_FD_IVP(fd))) {
/* TODO complete it. */
- RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
+ DPAA2_SEC_ERR("error: non inline buffer");
return NULL;
}
- op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
- DPAA2_GET_FLE_ADDR((fle - 1)));
+ op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
/* Prefeth op */
src = op->sym->m_src;
@@ -1264,19 +1273,19 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
} else
dst = src;
- PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
- (void *)dst, dst->buf_addr);
-
- PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
+ " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
+ (void *)dst,
+ dst->buf_addr,
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
/* free the fle memory */
if (likely(rte_pktmbuf_is_contiguous(src))) {
- priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+ priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
rte_mempool_put(priv->fle_pool, (void *)(fle-1));
} else
rte_free((void *)(fle-1));
@@ -1300,14 +1309,14 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
const struct qbman_fd *fd;
struct qbman_pull_desc pulldesc;
- if (!DPAA2_PER_LCORE_SEC_DPIO) {
- ret = dpaa2_affine_qbman_swp_sec();
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_SEC_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ swp = DPAA2_PER_LCORE_PORTAL;
dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
@@ -1322,8 +1331,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
/*Issue a volatile dequeue command. */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- RTE_LOG(WARNING, PMD,
- "SEC VDQ command is not issued : QBMAN busy\n");
+ DPAA2_SEC_WARN(
+ "SEC VDQ command is not issued : QBMAN busy");
/* Portal was busy, try again */
continue;
}
@@ -1355,7 +1364,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
status = (uint8_t)qbman_result_DQ_flags(dq_storage);
if (unlikely(
(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
- PMD_RX_LOG(DEBUG, "No frame is delivered");
+ DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
continue;
}
}
@@ -1365,8 +1374,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
if (unlikely(fd->simple.frc)) {
/* TODO Parse SEC errors */
- RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
- fd->simple.frc);
+ DPAA2_SEC_ERR("SEC returned Error - %x",
+ fd->simple.frc);
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
} else {
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -1378,7 +1387,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
dpaa2_qp->rx_vq.rx_pkts += num_rx;
- PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
+ DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
/*Return the total number of packets received to DPAA2 app*/
return num_rx;
}
@@ -1420,11 +1429,11 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
/* If qp is already in use free ring memory and qp metadata. */
if (dev->data->queue_pairs[qp_id] != NULL) {
- PMD_DRV_LOG(INFO, "QP already setup");
+ DPAA2_SEC_INFO("QP already setup");
return 0;
}
- PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+ DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
dev, qp_id, qp_conf);
memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
@@ -1432,7 +1441,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
RTE_CACHE_LINE_SIZE);
if (!qp) {
- RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
+ DPAA2_SEC_ERR("malloc failed for rx/tx queues");
return -1;
}
@@ -1442,20 +1451,20 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
sizeof(struct queue_storage_info_t),
RTE_CACHE_LINE_SIZE);
if (!qp->rx_vq.q_storage) {
- RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
+ DPAA2_SEC_ERR("malloc failed for q_storage");
return -1;
}
memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
- RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
+ DPAA2_SEC_ERR("Unable to allocate dequeue storage");
return -1;
}
dev->data->queue_pairs[qp_id] = qp;
cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
- cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+ cfg.user_ctx = (size_t)(&qp->rx_vq);
retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
qp_id, &cfg);
return retcode;
@@ -1517,7 +1526,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1528,7 +1537,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -1;
}
@@ -1536,7 +1545,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
memcpy(session->cipher_key.data, xform->cipher.key.data,
xform->cipher.key.length);
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
@@ -1571,11 +1580,11 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
case RTE_CRYPTO_CIPHER_NULL:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
xform->cipher.algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
xform->cipher.algo);
goto error_out;
}
@@ -1586,7 +1595,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
&cipherdata, NULL, session->iv.length,
session->dir);
if (bufsize < 0) {
- RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
+ DPAA2_SEC_ERR("Crypto: Descriptor build failed");
goto error_out;
}
flc->dhr = 0;
@@ -1595,16 +1604,15 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
- i, priv->flc_desc[0].desc[i]);
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
return 0;
@@ -1621,7 +1629,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
{
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
@@ -1633,7 +1641,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1643,7 +1651,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("Unable to allocate memory for auth key");
rte_free(priv);
return -1;
}
@@ -1651,7 +1659,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, xform->auth.key.data,
xform->auth.key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
@@ -1703,12 +1711,12 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- xform->auth.algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
+ xform->auth.algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- xform->auth.algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ xform->auth.algo);
goto error_out;
}
session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
@@ -1717,18 +1725,22 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1, 0, &authdata, !session->dir,
session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
- i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
return 0;
@@ -1747,7 +1759,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo aeaddata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
@@ -1765,7 +1777,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1775,7 +1787,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for aead key\n");
+ DPAA2_SEC_ERR("No Memory for aead key");
rte_free(priv);
return -1;
}
@@ -1786,7 +1798,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->aead_key.length = aead_xform->key.length;
ctxt->auth_only_len = aead_xform->aad_length;
- aeaddata.key = (uint64_t)session->aead_key.data;
+ aeaddata.key = (size_t)session->aead_key.data;
aeaddata.keylen = session->aead_key.length;
aeaddata.key_enc_flags = 0;
aeaddata.key_type = RTA_DATA_IMM;
@@ -1798,12 +1810,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
- aead_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
+ aead_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
- aead_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
goto error_out;
}
session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
@@ -1816,7 +1828,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
&priv->flc_desc[0].desc[1], 1);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
goto error_out;
}
if (priv->flc_desc[0].desc[1] & 1) {
@@ -1838,16 +1850,21 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
priv->flc_desc[0].desc, 1, 0,
&aeaddata, session->iv.length,
session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
i, priv->flc_desc[0].desc[i]);
return 0;
@@ -1867,7 +1884,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata, cipherdata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_cipher_xform *cipher_xform;
@@ -1899,7 +1916,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1909,7 +1926,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -1;
}
@@ -1917,7 +1934,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
rte_free(priv);
return -1;
@@ -1928,7 +1945,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
@@ -1980,15 +1997,15 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
goto error_out;
}
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
@@ -2014,12 +2031,12 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
goto error_out;
}
session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
@@ -2033,7 +2050,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
&priv->flc_desc[0].desc[2], 2);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
goto error_out;
}
if (priv->flc_desc[0].desc[2] & 1) {
@@ -2059,21 +2076,25 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
ctxt->auth_only_len,
session->digest_length,
session->dir);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
} else {
- RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
+ DPAA2_SEC_ERR("Hash before cipher not supported");
goto error_out;
}
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
i, priv->flc_desc[0].desc[i]);
return 0;
@@ -2094,7 +2115,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct\n");
+ DPAA2_SEC_ERR("Invalid session struct");
return -1;
}
@@ -2130,7 +2151,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa2_sec_aead_init(dev, xform, session);
} else {
- RTE_LOG(ERR, PMD, "Invalid crypto type\n");
+ DPAA2_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
@@ -2150,7 +2171,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
struct ipsec_encap_pdb encap_pdb;
struct ipsec_decap_pdb decap_pdb;
struct alginfo authdata, cipherdata;
- unsigned int bufsize;
+ int bufsize;
struct sec_flow_context *flc;
PMD_INIT_FUNC_TRACE();
@@ -2168,7 +2189,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT");
+ DPAA2_SEC_ERR("No memory for priv CTXT");
return -ENOMEM;
}
@@ -2180,7 +2201,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL &&
cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -ENOMEM;
}
@@ -2191,7 +2212,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL &&
auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
rte_free(priv);
return -ENOMEM;
@@ -2202,7 +2223,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
@@ -2253,15 +2274,15 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
goto out;
}
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
@@ -2289,12 +2310,12 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
goto out;
}
@@ -2340,15 +2361,21 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
1, 0, &decap_pdb, &cipherdata, &authdata);
} else
goto out;
+
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto out;
+ }
+
flc->word1_sdl = (uint8_t)bufsize;
/* Enable the stashing control bit */
DPAA2_SET_FLC_RSC(flc);
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq) | 0x14);
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
/* Set EWS bit i.e. enable write-safe */
@@ -2379,8 +2406,7 @@ dpaa2_sec_security_session_create(void *dev,
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
@@ -2395,9 +2421,7 @@ dpaa2_sec_security_session_create(void *dev,
return -EINVAL;
}
if (ret != 0) {
- PMD_DRV_LOG(ERR,
- "DPAA2 PMD: failed to configure session parameters");
-
+ DPAA2_SEC_ERR("Failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
@@ -2441,16 +2465,13 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
- "session parameters");
-
+ DPAA2_SEC_ERR("Failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
@@ -2511,14 +2532,13 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
- priv->hw_id);
+ DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
+ priv->hw_id);
goto get_attr_failure;
}
ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
if (ret) {
- PMD_INIT_LOG(ERR,
- "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
+ DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
goto get_attr_failure;
}
for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
@@ -2526,14 +2546,14 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
&rx_attr);
dpaa2_q->fqid = rx_attr.fqid;
- PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
+ DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
}
for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
dpaa2_q = &qp[i]->tx_vq;
dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
&tx_attr);
dpaa2_q->fqid = tx_attr.fqid;
- PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
+ DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
}
return 0;
@@ -2553,15 +2573,14 @@ dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
+ DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
priv->hw_id);
return;
}
ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
if (ret < 0) {
- PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
- ret);
+ DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
return;
}
}
@@ -2585,8 +2604,7 @@ dpaa2_sec_dev_close(struct rte_cryptodev *dev)
/*Close the device at underlying layer*/
ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
- " error code %d\n", ret);
+ DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
return -1;
}
@@ -2626,12 +2644,12 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (stats == NULL) {
- PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ DPAA2_SEC_ERR("Invalid stats ptr NULL");
return;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
continue;
}
@@ -2644,16 +2662,16 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
&counters);
if (ret) {
- PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
+ DPAA2_SEC_ERR("SEC counters failed");
} else {
- PMD_DRV_LOG(INFO, "dpseci hw stats:"
- "\n\tNumber of Requests Dequeued = %lu"
- "\n\tNumber of Outbound Encrypt Requests = %lu"
- "\n\tNumber of Inbound Decrypt Requests = %lu"
- "\n\tNumber of Outbound Bytes Encrypted = %lu"
- "\n\tNumber of Outbound Bytes Protected = %lu"
- "\n\tNumber of Inbound Bytes Decrypted = %lu"
- "\n\tNumber of Inbound Bytes Validated = %lu",
+ DPAA2_SEC_INFO("dpseci hardware stats:"
+ "\n\tNum of Requests Dequeued = %" PRIu64
+ "\n\tNum of Outbound Encrypt Requests = %" PRIu64
+ "\n\tNum of Inbound Decrypt Requests = %" PRIu64
+ "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
+ "\n\tNum of Outbound Bytes Protected = %" PRIu64
+ "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
+ "\n\tNum of Inbound Bytes Validated = %" PRIu64,
counters.dequeued_requests,
counters.ob_enc_requests,
counters.ib_dec_requests,
@@ -2675,7 +2693,7 @@ void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
continue;
}
qp[i]->tx_vq.rx_pkts = 0;
@@ -2729,8 +2747,8 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
rte_mempool_free(internals->fle_pool);
- PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
- dev->data->name, rte_socket_id());
+ DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
return 0;
}
@@ -2751,7 +2769,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
PMD_INIT_FUNC_TRACE();
dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
if (dpaa2_dev == NULL) {
- PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
+ DPAA2_SEC_ERR("DPAA2 SEC device not found");
return -1;
}
hw_id = dpaa2_dev->object_id;
@@ -2776,7 +2794,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ DPAA2_SEC_DEBUG("Device already init by primary process");
return 0;
}
@@ -2794,21 +2812,21 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
sizeof(struct fsl_mc_io), 0);
if (!dpseci) {
- PMD_INIT_LOG(ERR,
- "Error in allocating the memory for dpsec object");
+ DPAA2_SEC_ERR(
+ "Error in allocating the memory for dpsec object");
return -1;
}
dpseci->regs = rte_mcp_ptr_list[0];
retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
if (retcode != 0) {
- PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
- retcode);
+ DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
+ retcode);
goto init_error;
}
retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
if (retcode != 0) {
- PMD_INIT_LOG(ERR,
+ DPAA2_SEC_ERR(
"Cannot get dpsec device attributed: Error = %x",
retcode);
goto init_error;
@@ -2828,15 +2846,15 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
if (!internals->fle_pool) {
- RTE_LOG(ERR, PMD, "%s create failed\n", str);
+ DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
goto init_error;
}
- PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
return 0;
init_error:
- PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+ DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
/* dpaa2_sec_uninit(crypto_dev_name); */
return -EFAULT;
@@ -2866,7 +2884,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
if (cryptodev->data->dev_private == NULL)
rte_panic("Cannot allocate memzone for private "
- "device data");
+ "device data");
}
dpaa2_dev->cryptodev = cryptodev;
@@ -2919,5 +2937,15 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
static struct cryptodev_driver dpaa2_sec_crypto_drv;
RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
+ rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
+
+RTE_INIT(dpaa2_sec_init_log);
+static void
+dpaa2_sec_init_log(void)
+{
+ /* Bus level logs */
+ dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
+ if (dpaa2_logtype_sec >= 0)
+ rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
index 23251141..7c1f5e73 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -8,37 +8,35 @@
#ifndef _DPAA2_SEC_LOGS_H_
#define _DPAA2_SEC_LOGS_H_
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+extern int dpaa2_logtype_sec;
+
+#define DPAA2_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_sec, "dpaa2_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA2_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_sec, "dpaa2_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA2_SEC_LOG(DEBUG, " >>")
+
+#define DPAA2_SEC_INFO(fmt, args...) \
+ DPAA2_SEC_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_ERR(fmt, args...) \
+ DPAA2_SEC_LOG(ERR, fmt, ## args)
+#define DPAA2_SEC_WARN(fmt, args...) \
+ DPAA2_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_SEC_DP_DEBUG(fmt, args...) \
+ DPAA2_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_SEC_DP_INFO(fmt, args...) \
+ DPAA2_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_DP_WARN(fmt, args...) \
+ DPAA2_SEC_DP_LOG(WARNING, fmt, ## args)
+
#endif /* _DPAA2_SEC_LOGS_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index e8ac95ba..a9d83ebc 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -185,9 +185,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 1,
.max = 16,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -206,9 +206,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -227,9 +227,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -248,9 +248,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 32,
- .max = 32,
- .increment = 0
+ .min = 1,
+ .max = 32,
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -269,9 +269,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 1,
.max = 48,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -290,9 +290,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
new file mode 100644
index 00000000..01afc587
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['security', 'mempool_dpaa2']
+sources = files('dpaa2_sec_dpseci.c',
+ 'mc/dpseci.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('mc', 'hw')
diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile
index fe2c5932..9be44704 100644
--- a/drivers/crypto/dpaa_sec/Makefile
+++ b/drivers/crypto/dpaa_sec/Makefile
@@ -12,13 +12,8 @@ LIB = librte_pmd_dpaa_sec.a
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -D _GNU_SOURCE
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 18681cf3..06f7e437 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP
+ * Copyright 2017-2018 NXP
*
*/
@@ -39,6 +39,8 @@
enum rta_sec_era rta_sec_era;
+int dpaa_logtype_sec;
+
static uint8_t cryptodev_driver_id;
static __thread struct rte_crypto_op **dpaa_sec_ops;
@@ -53,7 +55,7 @@ dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
if (!ctx->fd_status) {
ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
- PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
@@ -69,7 +71,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
if (!ctx || retval) {
- PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
+ DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
return NULL;
}
/*
@@ -84,7 +86,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
ctx->ctx_pool = ses->ctx_pool;
- ctx->vtop_offset = (uint64_t) ctx
+ ctx->vtop_offset = (size_t) ctx
- rte_mempool_virt2iova(ctx);
return ctx;
@@ -93,43 +95,18 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
static inline rte_iova_t
dpaa_mem_vtop(void *vaddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- uint64_t vaddr_64, paddr;
- int i;
-
- vaddr_64 = (uint64_t)vaddr;
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (vaddr_64 >= memseg[i].addr_64 &&
- vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
- paddr = memseg[i].iova +
- (vaddr_64 - memseg[i].addr_64);
-
- return (rte_iova_t)paddr;
- }
- }
- return (rte_iova_t)(NULL);
-}
+ const struct rte_memseg *ms;
-/* virtual address conversin when mempool support is available for ctx */
-static inline phys_addr_t
-dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
-{
- return (uint64_t)vaddr - ctx->vtop_offset;
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+ return (size_t)NULL;
}
static inline void *
dpaa_mem_ptov(rte_iova_t paddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- int i;
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (paddr >= memseg[i].iova &&
- (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
- return (void *)(memseg[i].addr_64 +
- (paddr - memseg[i].iova));
- }
- return NULL;
+ return rte_mem_iova2virt(paddr);
}
static void
@@ -137,8 +114,8 @@ ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
struct qman_fq *fq,
const struct qm_mr_entry *msg)
{
- RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
- fq->fqid, msg->ern.rc, msg->ern.seqnum);
+ DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
+ fq->fqid, msg->ern.rc, msg->ern.seqnum);
}
/* initialize the queue with dest chan as caam chan so that
@@ -166,11 +143,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
fq_in->cb.ern = ern_sec_fq_handler;
- PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
+ DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
ret = qman_init_fq(fq_in, flags, &fq_opts);
if (unlikely(ret != 0))
- PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
+ DPAA_SEC_ERR("qman_init_fq failed %d", ret);
return ret;
}
@@ -229,7 +206,7 @@ dpaa_sec_init_tx(struct qman_fq *fq)
ret = qman_create_fq(0, flags, fq);
if (unlikely(ret)) {
- PMD_INIT_LOG(ERR, "qman_create_fq failed");
+ DPAA_SEC_ERR("qman_create_fq failed");
return ret;
}
@@ -244,7 +221,7 @@ dpaa_sec_init_tx(struct qman_fq *fq)
ret = qman_init_fq(fq, 0, &opts);
if (unlikely(ret)) {
- PMD_INIT_LOG(ERR, "unable to init caam source fq!");
+ DPAA_SEC_ERR("unable to init caam source fq!");
return ret;
}
@@ -336,7 +313,7 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
+ DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
}
}
@@ -365,7 +342,7 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
alginfo_c->algmode = OP_ALG_AAI_CTR;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
+ DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
}
}
@@ -378,7 +355,7 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
alginfo->algmode = OP_ALG_AAI_GCM;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
+ DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
}
}
@@ -388,7 +365,7 @@ static int
dpaa_sec_prep_cdb(dpaa_sec_session *ses)
{
struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
- uint32_t shared_desc_len = 0;
+ int32_t shared_desc_len = 0;
struct sec_cdb *cdb = &ses->cdb;
int err;
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
@@ -402,11 +379,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
if (is_cipher_only(ses)) {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ DPAA_SEC_ERR("not supported cipher alg");
return -ENOTSUP;
}
- alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.key = (size_t)ses->cipher_key.data;
alginfo_c.keylen = ses->cipher_key.length;
alginfo_c.key_enc_flags = 0;
alginfo_c.key_type = RTA_DATA_IMM;
@@ -420,11 +397,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
} else if (is_auth_only(ses)) {
caam_auth_alg(ses, &alginfo_a);
if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported auth alg\n");
+ DPAA_SEC_ERR("not supported auth alg");
return -ENOTSUP;
}
- alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.key = (size_t)ses->auth_key.data;
alginfo_a.keylen = ses->auth_key.length;
alginfo_a.key_enc_flags = 0;
alginfo_a.key_type = RTA_DATA_IMM;
@@ -436,10 +413,10 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
} else if (is_aead(ses)) {
caam_aead_alg(ses, &alginfo);
if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported aead alg\n");
+ DPAA_SEC_ERR("not supported aead alg");
return -ENOTSUP;
}
- alginfo.key = (uint64_t)ses->aead_key.data;
+ alginfo.key = (size_t)ses->aead_key.data;
alginfo.keylen = ses->aead_key.length;
alginfo.key_enc_flags = 0;
alginfo.key_type = RTA_DATA_IMM;
@@ -459,22 +436,22 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
} else {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ DPAA_SEC_ERR("not supported cipher alg");
return -ENOTSUP;
}
- alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.key = (size_t)ses->cipher_key.data;
alginfo_c.keylen = ses->cipher_key.length;
alginfo_c.key_enc_flags = 0;
alginfo_c.key_type = RTA_DATA_IMM;
caam_auth_alg(ses, &alginfo_a);
if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported auth alg\n");
+ DPAA_SEC_ERR("not supported auth alg");
return -ENOTSUP;
}
- alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.key = (size_t)ses->auth_key.data;
alginfo_a.keylen = ses->auth_key.length;
alginfo_a.key_enc_flags = 0;
alginfo_a.key_type = RTA_DATA_IMM;
@@ -487,21 +464,21 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
&cdb->sh_desc[2], 2);
if (err < 0) {
- PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
return err;
}
if (cdb->sh_desc[2] & 1)
alginfo_c.key_type = RTA_DATA_IMM;
else {
- alginfo_c.key = (uint64_t)dpaa_mem_vtop(
- (void *)alginfo_c.key);
+ alginfo_c.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_c.key);
alginfo_c.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
alginfo_a.key_type = RTA_DATA_IMM;
else {
- alginfo_a.key = (uint64_t)dpaa_mem_vtop(
- (void *)alginfo_a.key);
+ alginfo_a.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_a.key);
alginfo_a.key_type = RTA_DATA_PTR;
}
cdb->sh_desc[0] = 0;
@@ -530,6 +507,12 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
ses->digest_length, ses->dir);
}
}
+
+ if (shared_desc_len < 0) {
+ DPAA_SEC_ERR("error in preparing command block");
+ return shared_desc_len;
+ }
+
cdb->sh_hdr.hi.field.idlen = shared_desc_len;
cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
@@ -585,7 +568,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
if (!ctx->fd_status) {
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
- printf("\nSEC return err: 0x%x", ctx->fd_status);
+ DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
ops[pkts++] = op;
@@ -616,8 +599,8 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
extra_segs = 2;
if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
- MAX_SG_ENTRIES);
+ DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
return NULL;
}
ctx = dpaa_sec_alloc_ctx(ses);
@@ -640,7 +623,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->extension = 1;
in_sg->final = 1;
in_sg->length = sym->auth.data.length;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
/* 1st seg */
sg = in_sg + 1;
@@ -664,7 +647,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
- start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+ start_addr = dpaa_mem_vtop(old_digest);
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
in_sg->length += ses->digest_length;
@@ -718,7 +701,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
if (is_decode(ses)) {
/* need to extend the input to a compound frame */
sg->extension = 1;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
sg->length = sym->auth.data.length + ses->digest_length;
sg->final = 1;
cpu_to_hw_sg(sg);
@@ -732,7 +715,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
cpu_to_hw_sg(sg);
/* let's check digest by hw */
- start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+ start_addr = dpaa_mem_vtop(old_digest);
sg++;
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
@@ -769,8 +752,8 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
}
if (req_segs > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
- MAX_SG_ENTRIES);
+ DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
return NULL;
}
@@ -785,7 +768,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
out_sg = &cf->sg[0];
out_sg->extension = 1;
out_sg->length = sym->cipher.data.length;
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -814,7 +797,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->length = sym->cipher.data.length + ses->iv.length;
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* IV */
@@ -881,7 +864,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->extension = 1;
sg->final = 1;
sg->length = sym->cipher.data.length + ses->iv.length;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(sg);
sg = &cf->sg[2];
@@ -922,7 +905,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
req_segs++;
if (req_segs > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
+ DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
@@ -947,7 +930,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -991,7 +974,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
@@ -1028,7 +1011,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->aead.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1066,7 +1049,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
@@ -1111,7 +1094,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1125,7 +1108,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg,
dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
sg->length = sym->aead.data.length + ses->auth_only_len;
@@ -1170,7 +1153,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
}
if (req_segs > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
+ DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
@@ -1194,7 +1177,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -1236,7 +1219,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
@@ -1266,7 +1249,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->auth.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1303,7 +1286,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
@@ -1333,7 +1316,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1347,7 +1330,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
length = sg->length;
@@ -1422,7 +1405,6 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
struct rte_crypto_op *op;
struct dpaa_sec_job *cf;
dpaa_sec_session *ses;
- struct dpaa_sec_op_ctx *ctx;
uint32_t auth_only_len;
struct qman_fq *inq[DPAA_SEC_BURST];
@@ -1444,15 +1426,15 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
op->sym->sec_session);
break;
default:
- PMD_TX_LOG(ERR,
+ DPAA_SEC_DP_ERR(
"sessionless crypto op not supported");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
}
if (unlikely(!ses->qp || ses->qp != qp)) {
- PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
- ses->qp, qp);
+ DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
+ ses->qp, qp);
if (dpaa_sec_attach_sess_q(qp, ses)) {
frames_to_send = loop;
nb_ops = loop;
@@ -1475,7 +1457,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
} else if (is_proto_ipsec(ses)) {
cf = build_proto(op, ses);
} else {
- PMD_TX_LOG(ERR, "not supported sec op");
+ DPAA_SEC_DP_ERR("not supported ops");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
@@ -1491,7 +1473,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
} else if (is_auth_cipher(ses)) {
cf = build_cipher_auth_sg(op, ses);
} else {
- PMD_TX_LOG(ERR, "not supported sec op");
+ DPAA_SEC_DP_ERR("not supported ops");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
@@ -1507,8 +1489,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
inq[loop] = ses->inq;
fd->opaque_addr = 0;
fd->cmd = 0;
- ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
- qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
+ qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
fd->_format1 = qm_fd_compound;
fd->length29 = 2 * sizeof(struct qm_sg_entry);
/* Auth_only_len is set as 0 in descriptor and it is
@@ -1547,7 +1528,7 @@ dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
dpaa_qp->rx_pkts += num_rx;
dpaa_qp->rx_errs += nb_ops - num_rx;
- PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
+ DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
return num_rx;
}
@@ -1562,11 +1543,11 @@ dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
- PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
+ DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
internals = dev->data->dev_private;
if (qp_id >= internals->max_nb_queue_pairs) {
- PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ DPAA_SEC_ERR("Max supported qpid %d",
internals->max_nb_queue_pairs);
return -EINVAL;
}
@@ -1588,12 +1569,11 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
struct dpaa_sec_dev_private *internals;
struct dpaa_sec_qp *qp = NULL;
- PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
- dev, qp_id, qp_conf);
+ DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
internals = dev->data->dev_private;
if (qp_id >= internals->max_nb_queue_pairs) {
- PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ DPAA_SEC_ERR("Max supported qpid %d",
internals->max_nb_queue_pairs);
return -EINVAL;
}
@@ -1654,7 +1634,7 @@ dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
+ DPAA_SEC_ERR("No Memory for cipher key");
return -ENOMEM;
}
session->cipher_key.length = xform->cipher.key.length;
@@ -1676,7 +1656,7 @@ dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for auth key\n");
+ DPAA_SEC_ERR("No Memory for auth key");
return -ENOMEM;
}
session->auth_key.length = xform->auth.key.length;
@@ -1702,7 +1682,7 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
RTE_CACHE_LINE_SIZE);
if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for aead key\n");
+ DPAA_SEC_ERR("No Memory for aead key\n");
return -ENOMEM;
}
session->aead_key.length = xform->aead.key.length;
@@ -1727,7 +1707,7 @@ dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
return &qi->inq[i];
}
}
- PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
+ DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
return NULL;
}
@@ -1756,14 +1736,20 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
sess->qp = qp;
ret = dpaa_sec_prep_cdb(sess);
if (ret) {
- PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
+ DPAA_SEC_ERR("Unable to prepare sec cdb");
return -1;
}
-
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_SEC_ERR("Failure in affining portal");
+ return ret;
+ }
+ }
ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
qman_fq_fqid(&qp->outq));
if (ret)
- PMD_DRV_LOG(ERR, "Unable to init sec queue");
+ DPAA_SEC_ERR("Unable to init sec queue");
return ret;
}
@@ -1806,7 +1792,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct\n");
+ DPAA_SEC_ERR("invalid session struct");
return -EINVAL;
}
@@ -1831,7 +1817,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa_sec_cipher_init(dev, xform, session);
dpaa_sec_auth_init(dev, xform->next, session);
} else {
- PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
}
@@ -1842,7 +1828,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa_sec_auth_init(dev, xform, session);
dpaa_sec_cipher_init(dev, xform->next, session);
} else {
- PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
}
@@ -1852,13 +1838,13 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa_sec_aead_init(dev, xform, session);
} else {
- PMD_DRV_LOG(ERR, "Invalid crypto type");
+ DPAA_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
session->ctx_pool = internals->ctx_pool;
session->inq = dpaa_sec_attach_rxq(internals);
if (session->inq == NULL) {
- PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ DPAA_SEC_ERR("unable to attach sec queue");
goto err1;
}
@@ -1884,15 +1870,13 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
- "session parameters");
+ DPAA_SEC_ERR("failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
@@ -1958,7 +1942,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL &&
cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA_SEC_ERR("No Memory for cipher key");
return -ENOMEM;
}
@@ -1968,7 +1952,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL &&
auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
return -ENOMEM;
}
@@ -2013,11 +1997,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
auth_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
auth_xform->algo);
goto out;
}
@@ -2037,11 +2021,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
cipher_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
cipher_xform->algo);
goto out;
}
@@ -2086,7 +2070,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
session->ctx_pool = internals->ctx_pool;
session->inq = dpaa_sec_attach_rxq(internals);
if (session->inq == NULL) {
- PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ DPAA_SEC_ERR("unable to attach sec queue");
goto out;
}
@@ -2110,8 +2094,7 @@ dpaa_sec_security_session_create(void *dev,
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
@@ -2126,9 +2109,7 @@ dpaa_sec_security_session_create(void *dev,
return -EINVAL;
}
if (ret != 0) {
- PMD_DRV_LOG(ERR,
- "DPAA2 PMD: failed to configure session parameters");
-
+ DPAA_SEC_ERR("failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
@@ -2163,11 +2144,32 @@ dpaa_sec_security_session_destroy(void *dev __rte_unused,
static int
-dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+dpaa_sec_dev_configure(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config __rte_unused)
{
+
+ char str[20];
+ struct dpaa_sec_dev_private *internals;
+
PMD_INIT_FUNC_TRACE();
+ internals = dev->data->dev_private;
+ sprintf(str, "ctx_pool_%d", dev->data->dev_id);
+ if (!internals->ctx_pool) {
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ CTX_POOL_BUF_SIZE,
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ DPAA_SEC_ERR("%s create failed\n", str);
+ return -ENOMEM;
+ }
+ } else
+ DPAA_SEC_INFO("mempool already created for dev_id : %d",
+ dev->data->dev_id);
+
return 0;
}
@@ -2185,9 +2187,19 @@ dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
}
static int
-dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
+dpaa_sec_dev_close(struct rte_cryptodev *dev)
{
+ struct dpaa_sec_dev_private *internals;
+
PMD_INIT_FUNC_TRACE();
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ internals = dev->data->dev_private;
+ rte_mempool_free(internals->ctx_pool);
+ internals->ctx_pool = NULL;
+
return 0;
}
@@ -2246,18 +2258,20 @@ struct rte_security_ops dpaa_sec_security_ops = {
static int
dpaa_sec_uninit(struct rte_cryptodev *dev)
{
- struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ struct dpaa_sec_dev_private *internals;
if (dev == NULL)
return -ENODEV;
+ internals = dev->data->dev_private;
rte_free(dev->security_ctx);
+ /* In case close has been called, internals->ctx_pool would be NULL */
rte_mempool_free(internals->ctx_pool);
rte_free(internals);
- PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
- dev->data->name, rte_socket_id());
+ DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
return 0;
}
@@ -2270,7 +2284,6 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
struct dpaa_sec_qp *qp;
uint32_t i, flags;
int ret;
- char str[20];
PMD_INIT_FUNC_TRACE();
@@ -2295,7 +2308,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ DPAA_SEC_WARN("Device already init by primary process");
return 0;
}
@@ -2314,7 +2327,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
qp = &internals->qps[i];
ret = dpaa_sec_init_tx(&qp->outq);
if (ret) {
- PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
+ DPAA_SEC_ERR("config tx of queue pair %d", i);
goto init_error;
}
}
@@ -2325,28 +2338,16 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
/* create rx qman fq for sessions*/
ret = qman_create_fq(0, flags, &internals->inq[i]);
if (unlikely(ret != 0)) {
- PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
+ DPAA_SEC_ERR("sec qman_create_fq failed");
goto init_error;
}
}
- sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
- internals->ctx_pool = rte_mempool_create((const char *)str,
- CTX_POOL_NUM_BUFS,
- CTX_POOL_BUF_SIZE,
- CTX_POOL_CACHE_SIZE, 0,
- NULL, NULL, NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (!internals->ctx_pool) {
- RTE_LOG(ERR, PMD, "%s create failed\n", str);
- goto init_error;
- }
-
- PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
return 0;
init_error:
- PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+ DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
dpaa_sec_uninit(cryptodev);
return -EFAULT;
@@ -2445,5 +2446,14 @@ static struct rte_dpaa_driver rte_dpaa_sec_driver = {
static struct cryptodev_driver dpaa_sec_crypto_drv;
RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
cryptodev_driver_id);
+
+RTE_INIT(dpaa_sec_init_log);
+static void
+dpaa_sec_init_log(void)
+{
+ dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
+ if (dpaa_logtype_sec >= 0)
+ rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index f45b36cb..e15e373f 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -7,6 +7,9 @@
#ifndef _DPAA_SEC_H_
#define _DPAA_SEC_H_
+#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec
+/**< NXP DPAA - SEC PMD device name */
+
#define NUM_POOL_CHANNELS 4
#define DPAA_SEC_BURST 7
#define DPAA_SEC_ALG_UNSUPPORT (-1)
@@ -133,7 +136,7 @@ struct dpaa_sec_qp {
int tx_errs;
};
-#define RTE_DPAA_MAX_NB_SEC_QPS 1
+#define RTE_DPAA_MAX_NB_SEC_QPS 8
#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
@@ -182,10 +185,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 1,
.max = 16,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -202,10 +206,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -222,10 +227,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -242,10 +248,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 1,
.max = 32,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -262,10 +269,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 1,
.max = 48,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -282,10 +290,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
index 992a79f5..9784fcbf 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_log.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
@@ -1,44 +1,43 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright NXP 2017.
+ * Copyright 2017-2018 NXP
*
*/
#ifndef _DPAA_SEC_LOG_H_
#define _DPAA_SEC_LOG_H_
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+extern int dpaa_logtype_sec;
+
+#define DPAA_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_sec, "dpaa_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_sec, "dpaa_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA_SEC_LOG(DEBUG, " >>")
+
+#define DPAA_SEC_INFO(fmt, args...) \
+ DPAA_SEC_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_ERR(fmt, args...) \
+ DPAA_SEC_LOG(ERR, fmt, ## args)
+#define DPAA_SEC_WARN(fmt, args...) \
+ DPAA_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA_SEC_DP_DEBUG(fmt, args...) \
+ DPAA_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA_SEC_DP_INFO(fmt, args...) \
+ DPAA_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_DP_WARN(fmt, args...) \
+ DPAA_SEC_DP_LOG(WARNING, fmt, ## args)
+#define DPAA_SEC_DP_ERR(fmt, args...) \
+ DPAA_SEC_DP_LOG(ERR, fmt, ## args)
#endif /* _DPAA_SEC_LOG_H_ */
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
new file mode 100644
index 00000000..8a570984
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_dpaa', 'security']
+sources = files('dpaa_sec.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('../dpaa2_sec/')
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 356621d4..205dc1de 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -320,7 +320,7 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(session, 0, sizeof(struct kasumi_session));
memset(ops[i]->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
@@ -619,5 +619,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, cryptodev_kasumi_pmd_drv,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
+ cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id);
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index 17041ad8..d64ca418 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['qat', 'null', 'openssl']
+drivers = ['ccp', 'dpaa_sec', 'dpaa2_sec', 'mvsam',
+ 'null', 'openssl', 'qat', 'virtio']
+
std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/crypto/mrvl/Makefile b/drivers/crypto/mrvl/Makefile
deleted file mode 100644
index bc5c2270..00000000
--- a/drivers/crypto/mrvl/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Marvell International Ltd.
-# Copyright(c) 2017 Semihalf.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(MAKECMDGOALS),config)
-ifeq ($(LIBMUSDK_PATH),)
-$(error "Please define LIBMUSDK_PATH environment variable")
-endif
-endif
-endif
-
-# library name
-LIB = librte_pmd_mrvl_crypto.a
-
-# build flags
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -I$(LIBMUSDK_PATH)/include
-CFLAGS += -DMVCONF_TYPES_PUBLIC
-CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
-
-# library version
-LIBABIVER := 1
-
-# versioning export map
-EXPORT_MAP := rte_pmd_mrvl_version.map
-
-# external library dependencies
-LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
-LDLIBS += -lrte_bus_vdev
-
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd_ops.c
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/mrvl/rte_mrvl_compat.h b/drivers/crypto/mrvl/rte_mrvl_compat.h
deleted file mode 100644
index 22cd1840..00000000
--- a/drivers/crypto/mrvl/rte_mrvl_compat.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MRVL_COMPAT_H_
-#define _RTE_MRVL_COMPAT_H_
-
-/* Unluckily, container_of is defined by both DPDK and MUSDK,
- * we'll declare only one version.
- *
- * Note that it is not used in this PMD anyway.
- */
-#ifdef container_of
-#undef container_of
-#endif
-#include "env/mv_autogen_comp_flags.h"
-#include "drivers/mv_sam.h"
-#include "drivers/mv_sam_cio.h"
-#include "drivers/mv_sam_session.h"
-
-#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/drivers/crypto/mvsam/Makefile b/drivers/crypto/mvsam/Makefile
new file mode 100644
index 00000000..c3dc72c1
--- /dev/null
+++ b/drivers/crypto/mvsam/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvsam_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvsam_version.map
+
+# external library dependencies
+LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/mvsam/meson.build b/drivers/crypto/mvsam/meson.build
new file mode 100644
index 00000000..3c8ea3cf
--- /dev/null
+++ b/drivers/crypto/mvsam/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files('rte_mrvl_pmd.c', 'rte_mrvl_pmd_ops.c')
+
+deps += ['bus_vdev']
diff --git a/drivers/crypto/mvsam/rte_mrvl_compat.h b/drivers/crypto/mvsam/rte_mrvl_compat.h
new file mode 100644
index 00000000..4ab28d39
--- /dev/null
+++ b/drivers/crypto/mvsam/rte_mrvl_compat.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_MRVL_COMPAT_H_
+#define _RTE_MRVL_COMPAT_H_
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+#include "env/mv_autogen_comp_flags.h"
+#include "drivers/mv_sam.h"
+#include "drivers/mv_sam_cio.h"
+#include "drivers/mv_sam_session.h"
+
+#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd.c b/drivers/crypto/mvsam/rte_mrvl_pmd.c
index 31f3fe58..1b6029a5 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <rte_common.h>
@@ -853,5 +825,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
index a1de33ae..3f8de37b 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <string.h>
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
index 923faaf9..c16d95b4 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#ifndef _RTE_MRVL_PMD_PRIVATE_H_
@@ -37,7 +9,7 @@
#include "rte_mrvl_compat.h"
-#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl
+#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam
/**< Marvell PMD device name */
#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \
diff --git a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
index a7530317..a7530317 100644
--- a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map
+++ b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index dc8e5776..052b6546 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -247,5 +247,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 0d9bedc0..93c6d7e5 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -1569,7 +1569,7 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
openssl_reset_session(sess);
memset(sess, 0, sizeof(struct openssl_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -1733,5 +1733,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv, cryptodev_openssl_pmd_drv,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv,
+ cryptodev_openssl_pmd_drv.driver, cryptodev_driver_id);
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index 260912dc..07266a5e 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2015 Intel Corporation
+# Copyright(c) 2015-2018 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index 7b904630..006cd655 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
+# Copyright(c) 2017-2018 Intel Corporation
dep = dependency('libcrypto', required: false)
if not dep.found()
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
index 4f8f3d13..bfdbc979 100644
--- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
#define ADF_TRANSPORT_ACCESS_MACROS_H
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/crypto/qat/qat_adf/icp_qat_fw.h
index 5de34d55..ae39b7f1 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_fw.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_FW_H_
#define _ICP_QAT_FW_H_
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
index fbf2b839..c33bc3fe 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_FW_LA_H_
#define _ICP_QAT_FW_LA_H_
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
index d03688c7..56e3cf79 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_HW_H_
#define _ICP_QAT_HW_H_
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 802ba95d..88bd5f00 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_ALGS_H_
#define _ICP_QAT_ALGS_H_
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 26f854c2..c87ed40f 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -1,50 +1,6 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
-
#include <rte_memcpy.h>
#include <rte_common.h>
#include <rte_spinlock.h>
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 4afe159d..d9ce2a13 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#include <stdio.h>
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c182af2e..281a142b 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2016 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _QAT_CRYPTO_H_
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 37a6b7cb..001c32c5 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
+ * Copyright(c) 2017-2018 Intel Corporation
*/
#ifndef _QAT_CRYPTO_CAPABILITIES_H_
diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h
index 565089a4..c9144bf6 100644
--- a/drivers/crypto/qat/qat_logs.h
+++ b/drivers/crypto/qat/qat_logs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _QAT_LOGS_H_
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 87b9ce0b..7fea10c7 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#include <rte_common.h>
@@ -54,8 +54,6 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
int socket_id)
{
const struct rte_memzone *mz;
- unsigned memzone_flags = 0;
- const struct rte_memseg *ms;
PMD_INIT_FUNC_TRACE();
mz = rte_memzone_lookup(queue_name);
@@ -78,25 +76,8 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
queue_name, queue_size, socket_id);
- ms = rte_eal_get_physmem_layout();
- switch (ms[0].hugepage_sz) {
- case(RTE_PGSIZE_2M):
- memzone_flags = RTE_MEMZONE_2MB;
- break;
- case(RTE_PGSIZE_1G):
- memzone_flags = RTE_MEMZONE_1GB;
- break;
- case(RTE_PGSIZE_16M):
- memzone_flags = RTE_MEMZONE_16MB;
- break;
- case(RTE_PGSIZE_16G):
- memzone_flags = RTE_MEMZONE_16GB;
- break;
- default:
- memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
- }
- return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
- memzone_flags, queue_size);
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
}
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index bf837401..c8da07af 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#include <rte_bus_pci.h>
@@ -130,7 +130,7 @@ static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
{
struct rte_cryptodev_pmd_init_params init_params = {
.name = "",
- .socket_id = rte_socket_id(),
+ .socket_id = pci_dev->device.numa_node,
.private_data_size = sizeof(struct qat_pmd_private),
.max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS
};
@@ -176,5 +176,5 @@ static struct cryptodev_driver qat_crypto_drv;
RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd.driver,
cryptodev_qat_driver_id);
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 140c8b41..ed574cc1 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -91,8 +91,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx)
struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
uint32_t nb_caps = 0, i;
- if (sched_ctx->capabilities)
+ if (sched_ctx->capabilities) {
rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
for (i = 0; i < sched_ctx->nb_slaves; i++) {
struct rte_cryptodev_info dev_info;
@@ -462,8 +464,10 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
sched_ctx->ops.option_set = scheduler->ops->option_set;
sched_ctx->ops.option_get = scheduler->ops->option_get;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
if (sched_ctx->ops.create_private_ctx) {
int ret = (*sched_ctx->ops.create_private_ctx)(dev);
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 01e7646c..1c164da7 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -30,7 +30,7 @@ extern "C" {
#endif
/** Maximum number of multi-core worker cores */
-#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64)
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1)
/** Round-robin scheduling mode string */
#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index b2ce44ce..91fb0668 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -21,8 +21,8 @@ struct mc_scheduler_ctx {
uint32_t num_workers; /**< Number of workers polling */
uint32_t stop_signal;
- struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
- struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
+ struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
};
struct mc_scheduler_qp_ctx {
@@ -328,11 +328,13 @@ static int
scheduler_create_private_ctx(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
- struct mc_scheduler_ctx *mc_ctx;
+ struct mc_scheduler_ctx *mc_ctx = NULL;
uint16_t i;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
rte_socket_id());
@@ -345,25 +347,48 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
for (i = 0; i < sched_ctx->nb_wc; i++) {
char r_name[16];
- snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_enq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
- snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_deq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
}
sched_ctx->private_ctx = (void *)mc_ctx;
return 0;
+
+exit:
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ rte_ring_free(mc_ctx->sched_enq_ring[i]);
+ rte_ring_free(mc_ctx->sched_deq_ring[i]);
+ }
+ rte_free(mc_ctx);
+
+ return -1;
}
struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 96bf0161..d09e849a 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -334,8 +334,10 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct psd_scheduler_ctx *psd_ctx;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
rte_socket_id());
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 51a85fa6..25d6409f 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -20,7 +20,8 @@ struct scheduler_init_params {
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
uint32_t enable_ordering;
- uint64_t wcmask;
+ uint16_t wc_pool[RTE_MAX_LCORE];
+ uint16_t nb_wc;
char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
};
@@ -86,10 +87,6 @@ cryptodev_scheduler_create(const char *name,
return -EFAULT;
}
- if (init_params->wcmask != 0)
- RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
- init_params->wcmask);
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
@@ -100,15 +97,12 @@ cryptodev_scheduler_create(const char *name,
if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
uint16_t i;
- sched_ctx->nb_wc = 0;
+ sched_ctx->nb_wc = init_params->nb_wc;
- for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
- if (init_params->wcmask & (1ULL << i)) {
- sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
- RTE_LOG(INFO, PMD,
- " Worker core[%u]=%u added\n",
- sched_ctx->nb_wc-1, i);
- }
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ sched_ctx->wc_pool[i] = init_params->wc_pool[i];
+ RTE_LOG(INFO, PMD, " Worker core[%u]=%u added\n",
+ i, sched_ctx->wc_pool[i]);
}
}
@@ -232,9 +226,47 @@ static int
parse_coremask_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
+ int i, j, val;
+ uint16_t idx = 0;
+ char c;
struct scheduler_init_params *params = extra_args;
- params->wcmask = strtoull(value, NULL, 16);
+ params->nb_wc = 0;
+
+ if (value == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*value))
+ value++;
+ if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X')))
+ value += 2;
+ i = strlen(value);
+ while ((i > 0) && isblank(value[i - 1]))
+ i--;
+
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+ c = value[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+
+ for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) {
+ if ((1 << j) & val)
+ params->wc_pool[params->nb_wc++] = idx;
+ }
+ }
return 0;
}
@@ -246,7 +278,7 @@ parse_corelist_arg(const char *key __rte_unused,
{
struct scheduler_init_params *params = extra_args;
- params->wcmask = 0ULL;
+ params->nb_wc = 0;
const char *token = value;
@@ -254,7 +286,11 @@ parse_corelist_arg(const char *key __rte_unused,
char *rval;
unsigned int core = strtoul(token, &rval, 10);
- params->wcmask |= 1ULL << core;
+ if (core >= RTE_MAX_LCORE) {
+ CS_LOG_ERR("Invalid worker core %u, should be smaller "
+ "than %u.\n", core, RTE_MAX_LCORE);
+ }
+ params->wc_pool[params->nb_wc++] = (uint16_t)core;
token = (const char *)rval;
if (token[0] == '\0')
break;
@@ -468,5 +504,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"socket_id=<int> "
"slave=<name>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
- cryptodev_scheduler_pmd_drv,
+ cryptodev_scheduler_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 680c2afb..147dc51e 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -46,6 +46,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
sched_ctx->init_slave_names[i]);
rte_free(sched_ctx->init_slave_names[i]);
+ sched_ctx->init_slave_names[i] = NULL;
sched_ctx->nb_init_slaves -= 1;
}
@@ -261,11 +262,15 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
}
}
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
- if (sched_ctx->capabilities)
+ if (sched_ctx->capabilities) {
rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
return 0;
}
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index dd7ca5a4..12410b48 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -60,7 +60,7 @@ struct scheduler_ctx {
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
- uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ uint16_t wc_pool[RTE_MAX_LCORE];
uint16_t nb_wc;
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 27af69f2..72751e35 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -339,7 +339,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(session, 0, sizeof(struct snow3g_session));
memset(ops[i]->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
@@ -619,5 +619,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, cryptodev_snow3g_pmd_drv,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
+ cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id);
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 00000000..be7b828f
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_virtio_crypto.a
+
+#
+# include virtio_crypto.h
+#
+CFLAGS += -I$(RTE_SDK)/lib/librte_vhost
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_virtio_crypto_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtqueue.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_cryptodev.c
+
+# this lib depends upon:
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/virtio/meson.build b/drivers/crypto/virtio/meson.build
new file mode 100644
index 00000000..b15b3f9f
--- /dev/null
+++ b/drivers/crypto/virtio/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+includes += include_directories('../../../lib/librte_vhost')
+deps += 'bus_pci'
+name = 'virtio_crypto'
+sources = files('virtio_cryptodev.c', 'virtio_pci.c',
+ 'virtio_rxtx.c', 'virtqueue.c')
diff --git a/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.h b/drivers/crypto/virtio/virtio_crypto_algs.h
new file mode 100644
index 00000000..4c44af37
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_ALGS_H_
+#define _VIRTIO_CRYPTO_ALGS_H_
+
+#include <rte_memory.h>
+
+#include "virtio_crypto.h"
+
+struct virtio_crypto_session {
+ uint64_t session_id;
+
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } iv;
+
+ struct {
+ uint32_t length;
+ phys_addr_t phys_addr;
+ } aad;
+
+ struct virtio_crypto_op_ctrl_req ctrl;
+};
+
+#endif /* _VIRTIO_CRYPTO_ALGS_H_ */
diff --git a/drivers/crypto/virtio/virtio_crypto_capabilities.h b/drivers/crypto/virtio/virtio_crypto_capabilities.h
new file mode 100644
index 00000000..03c30dee
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_capabilities.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_CAPABILITIES_H_
+#define _VIRTIO_CRYPTO_CAPABILITIES_H_
+
+#define VIRTIO_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 20, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c
new file mode 100644
index 00000000..df88953f
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_cryptodev.c
@@ -0,0 +1,1504 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_eal.h>
+
+#include "virtio_cryptodev.h"
+#include "virtqueue.h"
+#include "virtio_crypto_algs.h"
+#include "virtio_crypto_capabilities.h"
+
+int virtio_crypto_logtype_init;
+int virtio_crypto_logtype_session;
+int virtio_crypto_logtype_rx;
+int virtio_crypto_logtype_tx;
+int virtio_crypto_logtype_driver;
+
+static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
+static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info);
+static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
+static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool);
+static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
+static unsigned int virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev);
+static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess);
+static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_mempool *mp);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
+ { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
+ VIRTIO_CRYPTO_PCI_DEVICEID) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
+ VIRTIO_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t cryptodev_virtio_driver_id;
+
+#define NUM_ENTRY_SYM_CREATE_SESSION 4
+
+static int
+virtio_crypto_send_command(struct virtqueue *vq,
+ struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
+ uint8_t *auth_key, struct virtio_crypto_session *session)
+{
+ uint8_t idx = 0;
+ uint8_t needed = 1;
+ uint32_t head = 0;
+ uint32_t len_cipher_key = 0;
+ uint32_t len_auth_key = 0;
+ uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
+ uint32_t len_total = 0;
+ uint32_t input_offset = 0;
+ void *virt_addr_started = NULL;
+ phys_addr_t phys_addr_started;
+ struct vring_desc *desc;
+ uint32_t desc_offset;
+ struct virtio_crypto_session_input *input;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
+ return -EINVAL;
+ }
+ /* cipher only is supported, it is available if auth_key is NULL */
+ if (!cipher_key) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
+ return -EINVAL;
+ }
+
+ head = vq->vq_desc_head_idx;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
+ head, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
+ return -ENOSPC;
+ }
+
+ /* calculate the length of cipher key */
+ if (cipher_key) {
+ switch (ctrl->u.sym_create_session.op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.cipher
+ .para.keylen;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.chain
+ .para.cipher_param.keylen;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
+ return -EINVAL;
+ }
+ }
+
+ /* calculate the length of auth key */
+ if (auth_key) {
+ len_auth_key =
+ ctrl->u.sym_create_session.u.chain.para.u.mac_param
+ .auth_key_len;
+ }
+
+ /*
+ * malloc memory to store indirect vring_desc entries, including
+ * ctrl request, cipher key, auth key, session input and desc vring
+ */
+ desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
+ + len_session_input;
+ virt_addr_started = rte_malloc(NULL,
+ desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (virt_addr_started == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
+ return -ENOSPC;
+ }
+ phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
+
+ /* address to store indirect vring desc entries */
+ desc = (struct vring_desc *)
+ ((uint8_t *)virt_addr_started + desc_offset);
+
+ /* ctrl req part */
+ memcpy(virt_addr_started, ctrl, len_ctrl_req);
+ desc[idx].addr = phys_addr_started;
+ desc[idx].len = len_ctrl_req;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_ctrl_req;
+ input_offset += len_ctrl_req;
+
+ /* cipher key part */
+ if (len_cipher_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ cipher_key, len_cipher_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_cipher_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_cipher_key;
+ input_offset += len_cipher_key;
+ }
+
+ /* auth key part */
+ if (len_auth_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ auth_key, len_auth_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_auth_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_auth_key;
+ input_offset += len_auth_key;
+ }
+
+ /* input part */
+ input = (struct virtio_crypto_session_input *)
+ ((uint8_t *)virt_addr_started + input_offset);
+ input->status = VIRTIO_CRYPTO_ERR;
+ input->session_id = ~0ULL;
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_session_input;
+ desc[idx].flags = VRING_DESC_F_WRITE;
+ idx++;
+
+ /* use a single desc entry */
+ vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
+ vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_free_cnt--;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+
+ while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ /* get the result */
+ if (input->status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
+ "status=%u, session_id=%" PRIu64 "",
+ input->status, input->session_id);
+ rte_free(virt_addr_started);
+ ret = -1;
+ } else {
+ session->session_id = input->session_id;
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
+ "session_id=%" PRIu64 "", input->session_id);
+ rte_free(virt_addr_started);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void
+virtio_crypto_queue_release(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq) {
+ hw = vq->hw;
+ /* Select and deactivate the queue */
+ VTPCI_OPS(hw)->del_queue(hw, vq);
+
+ rte_memzone_free(vq->mz);
+ rte_mempool_free(vq->mpool);
+ rte_free(vq);
+ }
+}
+
+#define MPOOL_MAX_NAME_SZ 32
+
+int
+virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq)
+{
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ char mpool_name[MPOOL_MAX_NAME_SZ];
+ const struct rte_memzone *mz;
+ unsigned int vq_size, size;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = NULL;
+ uint32_t i = 0;
+ uint32_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
+
+ /*
+ * Read the virtqueue size from the Queue Size field
+ * Always power of 2 and if 0 virtqueue does not exist
+ */
+ vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+ if (vq_size == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
+ return -EINVAL;
+ }
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
+
+ if (!rte_is_power_of_2(vq_size)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
+ return -EINVAL;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
+ dev->data->dev_id, vtpci_queue_idx);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_dataqueue%d_mpool",
+ dev->data->dev_id, vtpci_queue_idx);
+ } else if (queue_type == VTCRYPTO_CTRLQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
+ dev->data->dev_id);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_controlqueue_mpool",
+ dev->data->dev_id);
+ }
+ size = RTE_ALIGN_CEIL(sizeof(*vq) +
+ vq_size * sizeof(struct vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
+ vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
+ return -ENOMEM;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ /* pre-allocate a mempool and use it in the data plane to
+ * improve performance
+ */
+ vq->mpool = rte_mempool_lookup(mpool_name);
+ if (vq->mpool == NULL)
+ vq->mpool = rte_mempool_create(mpool_name,
+ vq_size,
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE, 0,
+ NULL, NULL, NULL, NULL, socket_id,
+ 0);
+ if (!vq->mpool) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
+ "Cannot create mempool");
+ goto mpool_create_err;
+ }
+ for (i = 0; i < vq_size; i++) {
+ vq->vq_descx[i].cookie =
+ rte_zmalloc("crypto PMD op cookie pointer",
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE);
+ if (vq->vq_descx[i].cookie == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
+ "alloc mem for cookie");
+ goto cookie_alloc_err;
+ }
+ }
+ }
+
+ vq->hw = hw;
+ vq->dev_id = dev->data->dev_id;
+ vq->vq_queue_index = vtpci_queue_idx;
+ vq->vq_nentries = vq_size;
+
+ /*
+ * Using part of the vring entries is permitted, but the maximum
+ * is vq_size
+ */
+ if (nb_desc == 0 || nb_desc > vq_size)
+ nb_desc = vq_size;
+ vq->vq_free_cnt = nb_desc;
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
+ (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
+ size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
+ if (mz == NULL) {
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(vq_name);
+ if (mz == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
+ goto mz_reserve_err;
+ }
+ }
+
+ /*
+ * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((mz->phys_addr + vq->vq_ring_size - 1)
+ >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
+ "above 16TB!");
+ goto vring_addr_err;
+ }
+
+ memset(mz->addr, 0, sizeof(mz->len));
+ vq->mz = mz;
+ vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_virt_mem = mz->addr;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
+ (uint64_t)mz->phys_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
+ (uint64_t)(uintptr_t)mz->addr);
+
+ *pvq = vq;
+
+ return 0;
+
+vring_addr_err:
+ rte_memzone_free(mz);
+mz_reserve_err:
+cookie_alloc_err:
+ rte_mempool_free(vq->mpool);
+ if (i != 0) {
+ for (j = 0; j < i; j++)
+ rte_free(vq->vq_descx[j].cookie);
+ }
+mpool_create_err:
+ rte_free(vq);
+ return -ENOMEM;
+}
+
+static int
+virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
+{
+ int ret;
+ struct virtqueue *vq;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ /* if virtio device has started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
+ 0, SOCKET_ID_ANY, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
+ return ret;
+ }
+
+ hw->cvq = vq;
+
+ return 0;
+}
+
+static void
+virtio_crypto_free_queues(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* control queue release */
+ virtio_crypto_queue_release(hw->cvq);
+
+ /* data queue release */
+ for (i = 0; i < hw->max_dataqueues; i++)
+ virtio_crypto_queue_release(dev->data->queue_pairs[i]);
+}
+
+static int
+virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+/*
+ * dev_ops for virtio, bare necessities for basic operation
+ */
+static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
+ /* Device related operations */
+ .dev_configure = virtio_crypto_dev_configure,
+ .dev_start = virtio_crypto_dev_start,
+ .dev_stop = virtio_crypto_dev_stop,
+ .dev_close = virtio_crypto_dev_close,
+ .dev_infos_get = virtio_crypto_dev_info_get,
+
+ .stats_get = virtio_crypto_dev_stats_get,
+ .stats_reset = virtio_crypto_dev_stats_reset,
+
+ .queue_pair_setup = virtio_crypto_qp_setup,
+ .queue_pair_release = virtio_crypto_qp_release,
+ .queue_pair_start = NULL,
+ .queue_pair_stop = NULL,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .session_get_size = virtio_crypto_sym_get_session_private_size,
+ .session_configure = virtio_crypto_sym_configure_session,
+ .session_clear = virtio_crypto_sym_clear_session,
+ .qp_attach_session = NULL,
+ .qp_detach_session = NULL
+};
+
+static void
+virtio_crypto_update_stats(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (stats == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
+ return;
+ }
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ const struct virtqueue *data_queue
+ = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ stats->enqueued_count += data_queue->packets_sent_total;
+ stats->enqueue_err_count += data_queue->packets_sent_failed;
+
+ stats->dequeued_count += data_queue->packets_received_total;
+ stats->dequeue_err_count
+ += data_queue->packets_received_failed;
+ }
+}
+
+static void
+virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ virtio_crypto_update_stats(dev, stats);
+}
+
+static void
+virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ struct virtqueue *data_queue = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ data_queue->packets_sent_total = 0;
+ data_queue->packets_sent_failed = 0;
+
+ data_queue->packets_received_total = 0;
+ data_queue->packets_received_failed = 0;
+ }
+}
+
+static int
+virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool __rte_unused)
+{
+ int ret;
+ struct virtqueue *vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* if virtio dev is started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
+ qp_conf->nb_descriptors, socket_id, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "virtio crypto data queue initialization failed\n");
+ return ret;
+ }
+
+ dev->data->queue_pairs[queue_pair_id] = vq;
+
+ return 0;
+}
+
+static int
+virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct virtqueue *vq
+ = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
+ return 0;
+ }
+
+ virtio_crypto_queue_release(vq);
+ return 0;
+}
+
+static int
+virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
+{
+ uint64_t host_features;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prepare guest_features: feature that driver wants to support */
+ VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
+ req_features);
+
+ /* Read device(host) feature bits */
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
+ host_features);
+
+ /*
+ * Negotiate features: Subset of device feature bits are written back
+ * guest feature bits.
+ */
+ hw->guest_features = req_features;
+ hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
+ host_features);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
+ hw->guest_features);
+
+ if (hw->modern) {
+ if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "VIRTIO_F_VERSION_1 features is not enabled.");
+ return -1;
+ }
+ vtpci_cryptodev_set_status(hw,
+ VIRTIO_CONFIG_STATUS_FEATURES_OK);
+ if (!(vtpci_cryptodev_get_status(hw) &
+ VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
+ "status!");
+ return -1;
+ }
+ }
+
+ hw->req_guest_features = req_features;
+
+ return 0;
+}
+
+/* reset device and renegotiate features if needed */
+static int
+virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
+ uint64_t req_features)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+ struct virtio_crypto_config local_config;
+ struct virtio_crypto_config *config = &local_config;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Reset the device although not necessary at startup */
+ vtpci_cryptodev_reset(hw);
+
+ /* Tell the host we've noticed this device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ if (virtio_negotiate_features(hw, req_features) < 0)
+ return -1;
+
+ /* Get status of the device */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, status),
+ &config->status, sizeof(config->status));
+ if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
+ "not ready");
+ return -1;
+ }
+
+ /* Get number of data queues */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, max_dataqueues),
+ &config->max_dataqueues,
+ sizeof(config->max_dataqueues));
+ hw->max_dataqueues = config->max_dataqueues;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
+ hw->max_dataqueues);
+
+ return 0;
+}
+
+/*
+ * This function is based on probe() function
+ * It returns 0 on success.
+ */
+static int
+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *cryptodev;
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+ init_params);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ cryptodev->driver_id = cryptodev_virtio_driver_id;
+ cryptodev->dev_ops = &virtio_crypto_dev_ops;
+
+ cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ hw = cryptodev->data->dev_private;
+ hw->dev_id = cryptodev->data->dev_id;
+ hw->virtio_dev_capabilities = virtio_capabilities;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
+ cryptodev->data->dev_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ /* pci device init */
+ if (vtpci_cryptodev_init(pci_dev, hw))
+ return -1;
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return -EPERM;
+
+ if (cryptodev->data->dev_started) {
+ virtio_crypto_dev_stop(cryptodev);
+ virtio_crypto_dev_close(cryptodev);
+ }
+
+ cryptodev->dev_ops = NULL;
+ cryptodev->enqueue_burst = NULL;
+ cryptodev->dequeue_burst = NULL;
+
+ /* release control queue */
+ virtio_crypto_queue_release(hw->cvq);
+
+ rte_free(cryptodev->data);
+ cryptodev->data = NULL;
+
+ VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ /* setup control queue
+ * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
+ * config->max_dataqueues is the control queue
+ */
+ if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
+ return -1;
+ }
+ virtio_crypto_ctrlq_start(cryptodev);
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_stop(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
+
+ vtpci_cryptodev_reset(hw);
+
+ virtio_crypto_dev_free_mbufs(dev);
+ virtio_crypto_free_queues(dev);
+
+ dev->data->dev_started = 0;
+}
+
+static int
+virtio_crypto_dev_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* Do final configuration before queue engine starts */
+ virtio_crypto_dataq_start(dev);
+ vtpci_cryptodev_reinit_complete(hw);
+
+ dev->data->dev_started = 1;
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
+{
+ uint32_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
+ "and unused buf", i);
+ VIRTQUEUE_DUMP((struct virtqueue *)
+ dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
+ i, dev->data->queue_pairs[i]);
+
+ virtqueue_detatch_unused(dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
+ "unused buf", i);
+ VIRTQUEUE_DUMP(
+ (struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+static unsigned int
+virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
+}
+
+static int
+virtio_crypto_check_sym_session_paras(
+ struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(dev == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
+ return -1;
+ }
+ if (unlikely(dev->data == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
+ return -1;
+ }
+ hw = dev->data->dev_private;
+ if (unlikely(hw == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
+ return -1;
+ }
+ if (unlikely(hw->cvq == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_clear_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (sess == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
+ return -1;
+ }
+
+ return virtio_crypto_check_sym_session_paras(dev);
+}
+
+#define NUM_ENTRY_SYM_CLEAR_SESSION 2
+
+static void
+virtio_crypto_sym_clear_session(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *vq;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct vring_desc *desc;
+ uint8_t *status;
+ uint8_t needed = 1;
+ uint32_t head;
+ uint8_t *malloc_virt_addr;
+ uint64_t malloc_phys_addr;
+ uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
+ uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
+ return;
+
+ hw = dev->data->dev_private;
+ vq = hw->cvq;
+ session = (struct virtio_crypto_session *)get_session_private_data(
+ sess, cryptodev_virtio_driver_id);
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
+ return;
+ }
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
+ "vq = %p", vq->vq_desc_head_idx, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "vq->vq_free_cnt = %d is less than %d, "
+ "not enough", vq->vq_free_cnt, needed);
+ return;
+ }
+
+ /*
+ * malloc memory to store information of ctrl request op,
+ * returned status and desc vring
+ */
+ malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
+ + NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (malloc_virt_addr == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
+ return;
+ }
+ malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
+
+ /* assign ctrl request op part */
+ ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
+ ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
+ /* default data virtqueue is 0 */
+ ctrl->header.queue_id = 0;
+ ctrl->u.destroy_session.session_id = session->session_id;
+
+ /* status part */
+ status = &(((struct virtio_crypto_inhdr *)
+ ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
+ *status = VIRTIO_CRYPTO_ERR;
+
+ /* indirect desc vring part */
+ desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
+ + desc_offset);
+
+ /* ctrl request part */
+ desc[0].addr = malloc_phys_addr;
+ desc[0].len = len_op_ctrl_req;
+ desc[0].flags = VRING_DESC_F_NEXT;
+ desc[0].next = 1;
+
+ /* status part */
+ desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
+ desc[1].len = len_inhdr;
+ desc[1].flags = VRING_DESC_F_WRITE;
+
+ /* use only a single desc entry */
+ head = vq->vq_desc_head_idx;
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
+ vq->vq_ring.desc[head].len
+ = NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc);
+ vq->vq_free_cnt -= needed;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+ while (vq->vq_ring.desc[desc_idx].flags
+ & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ if (*status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
+ "status=%"PRIu32", session_id=%"PRIu64"",
+ *status, session->session_id);
+ rte_free(malloc_virt_addr);
+ return;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
+ session->session_id);
+
+ memset(sess, 0, sizeof(struct virtio_crypto_session));
+ rte_free(malloc_virt_addr);
+}
+
+static struct rte_crypto_cipher_xform *
+virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_auth_xform *
+virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+/** Get xform chain order */
+static int
+virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return -1;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_AUTH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
+
+ return -1;
+}
+
+static int
+virtio_crypto_sym_pad_cipher_param(
+ struct virtio_crypto_cipher_session_para *para,
+ struct rte_crypto_cipher_xform *cipher_xform)
+{
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
+ "Cipher alg %u", cipher_xform->algo);
+ return -1;
+ }
+
+ para->keylen = cipher_xform->key.length;
+ switch (cipher_xform->op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ para->op = VIRTIO_CRYPTO_OP_DECRYPT;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
+ "parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_auth_param(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_auth_xform *auth_xform)
+{
+ uint32_t *algo;
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+
+ switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
+ algo = &(para->u.hash_param.algo);
+ break;
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
+ algo = &(para->u.mac_param.algo);
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
+ "specified",
+ ctrl->u.sym_create_session.u.chain.para.hash_mode);
+ return -1;
+ }
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_op_ctrl_req(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_sym_xform *xform, bool is_chainned,
+ uint8_t **cipher_key_data, uint8_t **auth_key_data,
+ struct virtio_crypto_session *session)
+{
+ int ret;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = virtio_crypto_get_cipher_xform(xform);
+ if (cipher_xform) {
+ if (is_chainned)
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.chain.para
+ .cipher_param, cipher_xform);
+ else
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.cipher.para,
+ cipher_xform);
+
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "pad cipher parameter failed");
+ return -1;
+ }
+
+ *cipher_key_data = cipher_xform->key.data;
+
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+ }
+
+ /* Get auth xform from crypto xform chain */
+ auth_xform = virtio_crypto_get_auth_xform(xform);
+ if (auth_xform) {
+ /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+ if (auth_xform->key.length) {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
+ para->u.mac_param.auth_key_len =
+ (uint32_t)auth_xform->key.length;
+ para->u.mac_param.hash_result_len =
+ auth_xform->digest_length;
+
+ *auth_key_data = auth_xform->key.data;
+ } else {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
+ para->u.hash_param.hash_result_len =
+ auth_xform->digest_length;
+ }
+
+ ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
+ "failed");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_configure_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sym_sess,
+ struct rte_mempool *mempool)
+{
+ if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
+ unlikely(mempool == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
+ return -1;
+ }
+
+ if (virtio_crypto_check_sym_session_paras(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_configure_session(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ struct virtio_crypto_session crypto_sess;
+ void *session_private = &crypto_sess;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl_req;
+ enum virtio_crypto_cmd_id cmd_id;
+ uint8_t *cipher_key_data = NULL;
+ uint8_t *auth_key_data = NULL;
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *control_vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
+ sess, mempool);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
+ return ret;
+ }
+
+ if (rte_mempool_get(mempool, &session_private)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ session = (struct virtio_crypto_session *)session_private;
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ ctrl_req = &session->ctrl;
+ ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
+ /* FIXME: support multiqueue */
+ ctrl_req->header.queue_id = 0;
+
+ hw = dev->data->dev_private;
+ control_vq = hw->cvq;
+
+ cmd_id = virtio_crypto_get_chain_order(xform);
+ if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+ if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+
+ switch (cmd_id) {
+ case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
+ case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
+ xform, true, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ case VIRTIO_CRYPTO_CMD_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
+ false, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, NULL, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Unsupported operation chain order parameter");
+ goto error_out;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ session_private);
+
+ return 0;
+
+error_out:
+ return -1;
+}
+
+static void
+virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (info != NULL) {
+ info->driver_id = cryptodev_virtio_driver_id;
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ info->feature_flags = dev->feature_flags;
+ info->max_nb_queue_pairs = hw->max_dataqueues;
+ info->sym.max_nb_sessions =
+ RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS;
+ info->capabilities = hw->virtio_dev_capabilities;
+ }
+}
+
+static int
+crypto_virtio_pci_probe(
+ struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct virtio_crypto_hw),
+ .max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return crypto_virtio_create(name, pci_dev, &init_params);
+}
+
+static int
+crypto_virtio_pci_remove(
+ struct rte_pci_device *pci_dev __rte_unused)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return virtio_crypto_dev_uninit(cryptodev);
+}
+
+static struct rte_pci_driver rte_virtio_crypto_driver = {
+ .id_table = pci_id_virtio_crypto_map,
+ .drv_flags = 0,
+ .probe = crypto_virtio_pci_probe,
+ .remove = crypto_virtio_pci_remove
+};
+
+static struct cryptodev_driver virtio_crypto_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
+ rte_virtio_crypto_driver.driver,
+ cryptodev_virtio_driver_id);
+
+RTE_INIT(virtio_crypto_init_log);
+static void
+virtio_crypto_init_log(void)
+{
+ virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
+ if (virtio_crypto_logtype_init >= 0)
+ rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_session =
+ rte_log_register("pmd.crypto.virtio.session");
+ if (virtio_crypto_logtype_session >= 0)
+ rte_log_set_level(virtio_crypto_logtype_session,
+ RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
+ if (virtio_crypto_logtype_rx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
+ if (virtio_crypto_logtype_tx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_driver =
+ rte_log_register("pmd.crypto.virtio.driver");
+ if (virtio_crypto_logtype_driver >= 0)
+ rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h
new file mode 100644
index 00000000..e402c030
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_cryptodev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTODEV_H_
+#define _VIRTIO_CRYPTODEV_H_
+
+#include "virtio_crypto.h"
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+
+/* Features desired/implemented by this driver. */
+#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1)
+
+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
+
+#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7
+
+extern uint8_t cryptodev_virtio_driver_id;
+
+enum virtio_crypto_cmd_id {
+ VIRTIO_CRYPTO_CMD_CIPHER = 0,
+ VIRTIO_CRYPTO_CMD_AUTH = 1,
+ VIRTIO_CRYPTO_CMD_CIPHER_HASH = 2,
+ VIRTIO_CRYPTO_CMD_HASH_CIPHER = 3
+};
+
+struct virtio_crypto_op_cookie {
+ struct virtio_crypto_op_data_req data_req;
+ struct virtio_crypto_inhdr inhdr;
+ struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];
+};
+
+/*
+ * Control queue function prototype
+ */
+void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev);
+
+/*
+ * Data queue function prototype
+ */
+void virtio_crypto_dataq_start(struct rte_cryptodev *dev);
+
+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq);
+
+void virtio_crypto_queue_release(struct virtqueue *vq);
+
+uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _VIRTIO_CRYPTODEV_H_ */
diff --git a/drivers/crypto/virtio/virtio_logs.h b/drivers/crypto/virtio/virtio_logs.h
new file mode 100644
index 00000000..26a286cf
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_logs.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_LOGS_H_
+#define _VIRTIO_LOGS_H_
+
+#include <rte_log.h>
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "PMD: %s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int virtio_crypto_logtype_init;
+
+#define VIRTIO_CRYPTO_INIT_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_init, \
+ "INIT: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_session;
+
+#define VIRTIO_CRYPTO_SESSION_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_session, \
+ "SESSION: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_rx;
+
+#define VIRTIO_CRYPTO_RX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_rx, \
+ "RX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_RX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_tx;
+
+#define VIRTIO_CRYPTO_TX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_tx, \
+ "TX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_TX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_driver;
+
+#define VIRTIO_CRYPTO_DRV_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_driver, \
+ "DRIVER: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(ERR, fmt, ## args)
+
+#endif /* _VIRTIO_LOGS_H_ */
diff --git a/drivers/crypto/virtio/virtio_pci.c b/drivers/crypto/virtio/virtio_pci.c
new file mode 100644
index 00000000..832c465b
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_pci.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ #include <dirent.h>
+ #include <fcntl.h>
+#endif
+
+#include <rte_io.h>
+#include <rte_bus.h>
+
+#include "virtio_pci.h"
+#include "virtqueue.h"
+
+/*
+ * Following macros are derived from linux/pci_regs.h, however,
+ * we can't simply include that header here, as there is no such
+ * file for non-Linux platform.
+ */
+#define PCI_CAPABILITY_LIST 0x34
+#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(hw) \
+ (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
+
+struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+static inline int
+check_vq_phys_addr_ok(struct virtqueue *vq)
+{
+ /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void
+io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
+{
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
+}
+
+static void
+modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ int i;
+ uint8_t *p;
+ uint8_t old_gen, new_gen;
+
+ do {
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
+
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
+
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+static void
+modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ int i;
+ const uint8_t *p = src;
+
+ for (i = 0; i < length; i++)
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
+}
+
+static uint64_t
+modern_get_features(struct virtio_crypto_hw *hw)
+{
+ uint32_t features_lo, features_hi;
+
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
+
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
+
+ return ((uint64_t)features_hi << 32) | features_lo;
+}
+
+static void
+modern_set_features(struct virtio_crypto_hw *hw, uint64_t features)
+{
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1),
+ &hw->common_cfg->guest_feature);
+
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32,
+ &hw->common_cfg->guest_feature);
+}
+
+static uint8_t
+modern_get_status(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(&hw->common_cfg->device_status);
+}
+
+static void
+modern_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ rte_write8(status, &hw->common_cfg->device_status);
+}
+
+static void
+modern_reset(struct virtio_crypto_hw *hw)
+{
+ modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ modern_get_status(hw);
+}
+
+static uint8_t
+modern_get_isr(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(hw->isr);
+}
+
+static uint16_t
+modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec)
+{
+ rte_write16(vec, &hw->common_cfg->msix_config);
+ return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq,
+ uint16_t vec)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+ return rte_read16(&hw->common_cfg->queue_msix_vector);
+}
+
+static uint16_t
+modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id)
+{
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
+}
+
+static int
+modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ uint64_t desc_addr, avail_addr, used_addr;
+ uint16_t notify_off;
+
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ desc_addr = vq->vq_ring_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+ ring[vq->vq_nentries]),
+ VIRTIO_PCI_VRING_ALIGN);
+
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
+ vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+
+ rte_write16(1, &hw->common_cfg->queue_enable);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
+ vq->notify_addr, notify_off);
+
+ return 0;
+}
+
+static void
+modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ rte_write16(0, &hw->common_cfg->queue_enable);
+}
+
+static void
+modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused,
+ struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, vq->notify_addr);
+}
+
+const struct virtio_pci_ops virtio_crypto_modern_ops = {
+ .read_dev_cfg = modern_read_dev_config,
+ .write_dev_cfg = modern_write_dev_config,
+ .reset = modern_reset,
+ .get_status = modern_get_status,
+ .set_status = modern_set_status,
+ .get_features = modern_get_features,
+ .set_features = modern_set_features,
+ .get_isr = modern_get_isr,
+ .set_config_irq = modern_set_config_irq,
+ .set_queue_irq = modern_set_queue_irq,
+ .get_queue_num = modern_get_queue_num,
+ .setup_queue = modern_setup_queue,
+ .del_queue = modern_del_queue,
+ .notify_queue = modern_notify_queue,
+};
+
+void
+vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
+}
+
+void
+vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
+}
+
+uint64_t
+vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features)
+{
+ uint64_t features;
+
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & hw->guest_features;
+ VTPCI_OPS(hw)->set_features(hw, features);
+
+ return features;
+}
+
+void
+vtpci_cryptodev_reset(struct virtio_crypto_hw *hw)
+{
+ VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ /* flush status write */
+ VTPCI_OPS(hw)->get_status(hw);
+}
+
+void
+vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw)
+{
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+void
+vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ if (status != VIRTIO_CONFIG_STATUS_RESET)
+ status |= VTPCI_OPS(hw)->get_status(hw);
+
+ VTPCI_OPS(hw)->set_status(hw, status);
+}
+
+uint8_t
+vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_status(hw);
+}
+
+uint8_t
+vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_isr(hw);
+}
+
+static void *
+get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
+{
+ uint8_t bar = cap->bar;
+ uint32_t length = cap->length;
+ uint32_t offset = cap->offset;
+ uint8_t *base;
+
+ if (bar >= PCI_MAX_RESOURCE) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > dev->mem_resource[bar].len) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "invalid cap: overflows bar space: %u > %" PRIu64,
+ offset + length, dev->mem_resource[bar].len);
+ return NULL;
+ }
+
+ base = dev->mem_resource[bar].addr;
+ if (base == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
+ return NULL;
+ }
+
+ return base + offset;
+}
+
+#define PCI_MSIX_ENABLE 0x8000
+
+static int
+virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ uint8_t pos;
+ struct virtio_pci_cap cap;
+ int ret;
+
+ if (rte_pci_map_device(dev)) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
+ return -1;
+ }
+
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list");
+ return -1;
+ }
+
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "failed to read pci cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = VIRTIO_MSIX_ENABLED;
+ else
+ hw->use_msix = VIRTIO_MSIX_DISABLED;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] skipping non VNDR cap id: %02x",
+ pos, cap.cap_vndr);
+ goto next;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
+ pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
+ hw->notify_base = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cfg_addr(dev, &cap);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->dev_cfg == NULL || hw->isr == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
+ return -1;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
+ hw->notify_base, hw->notify_off_multiplier);
+
+ return 0;
+}
+
+/*
+ * Return -1:
+ * if there is error mapping with VFIO/UIO.
+ * if port map error when driver type is KDRV_NONE.
+ * if whitelisted but driver type is KDRV_UNKNOWN.
+ * Return 1 if kernel driver is managing the device.
+ * Return 0 on success.
+ */
+int
+vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ /*
+ * Try if we can succeed reading virtio pci caps, which exists
+ * only on modern pci device. If failed, we fallback to legacy
+ * virtio handling.
+ */
+ if (virtio_read_caps(dev, hw) == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
+ virtio_hw_internal[hw->dev_id].vtpci_ops =
+ &virtio_crypto_modern_ops;
+ hw->modern = 1;
+ return 0;
+ }
+
+ /*
+ * virtio crypto conforms to virtio 1.0 and doesn't support
+ * legacy mode
+ */
+ return -1;
+}
diff --git a/drivers/crypto/virtio/virtio_pci.h b/drivers/crypto/virtio/virtio_pci.h
new file mode 100644
index 00000000..604ec366
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_pci.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_PCI_H_
+#define _VIRTIO_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+
+#include "virtio_crypto.h"
+
+struct virtqueue;
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4
+#define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
+#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
+#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
+ * also clears the register (8, RO)
+ */
+/* Only if MSIX is enabled: */
+
+/* configuration change vector (16, RW) */
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+/* vector for selected VQ notifications */
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR 0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/* Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them?
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT 27
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define VIRTIO_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG 5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+ uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ uint8_t cap_next; /* Generic PCI field: next ptr. */
+ uint8_t cap_len; /* Generic PCI field: capability length */
+ uint8_t cfg_type; /* Identifies the structure. */
+ uint8_t bar; /* Where to find it. */
+ uint8_t padding[3]; /* Pad to full dword. */
+ uint32_t offset; /* Offset within bar. */
+ uint32_t length; /* Length of the structure, in bytes. */
+};
+
+struct virtio_pci_notify_cap {
+ struct virtio_pci_cap cap;
+ uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+ /* About the whole device. */
+ uint32_t device_feature_select; /* read-write */
+ uint32_t device_feature; /* read-only */
+ uint32_t guest_feature_select; /* read-write */
+ uint32_t guest_feature; /* read-write */
+ uint16_t msix_config; /* read-write */
+ uint16_t num_queues; /* read-only */
+ uint8_t device_status; /* read-write */
+ uint8_t config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ uint16_t queue_select; /* read-write */
+ uint16_t queue_size; /* read-write, power of 2. */
+ uint16_t queue_msix_vector; /* read-write */
+ uint16_t queue_enable; /* read-write */
+ uint16_t queue_notify_off; /* read-only */
+ uint32_t queue_desc_lo; /* read-write */
+ uint32_t queue_desc_hi; /* read-write */
+ uint32_t queue_avail_lo; /* read-write */
+ uint32_t queue_avail_hi; /* read-write */
+ uint32_t queue_used_lo; /* read-write */
+ uint32_t queue_used_hi; /* read-write */
+};
+
+struct virtio_crypto_hw;
+
+struct virtio_pci_ops {
+ void (*read_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int len);
+ void (*write_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int len);
+ void (*reset)(struct virtio_crypto_hw *hw);
+
+ uint8_t (*get_status)(struct virtio_crypto_hw *hw);
+ void (*set_status)(struct virtio_crypto_hw *hw, uint8_t status);
+
+ uint64_t (*get_features)(struct virtio_crypto_hw *hw);
+ void (*set_features)(struct virtio_crypto_hw *hw, uint64_t features);
+
+ uint8_t (*get_isr)(struct virtio_crypto_hw *hw);
+
+ uint16_t (*set_config_irq)(struct virtio_crypto_hw *hw, uint16_t vec);
+
+ uint16_t (*set_queue_irq)(struct virtio_crypto_hw *hw,
+ struct virtqueue *vq, uint16_t vec);
+
+ uint16_t (*get_queue_num)(struct virtio_crypto_hw *hw,
+ uint16_t queue_id);
+ int (*setup_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*del_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*notify_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+};
+
+struct virtio_crypto_hw {
+ /* control queue */
+ struct virtqueue *cvq;
+ uint16_t dev_id;
+ uint16_t max_dataqueues;
+ uint64_t req_guest_features;
+ uint64_t guest_features;
+ uint8_t use_msix;
+ uint8_t modern;
+ uint32_t notify_off_multiplier;
+ uint8_t *isr;
+ uint16_t *notify_base;
+ struct virtio_pci_common_cfg *common_cfg;
+ struct virtio_crypto_config *dev_cfg;
+ const struct rte_cryptodev_capabilities *virtio_dev_capabilities;
+};
+
+/*
+ * While virtio_crypto_hw is stored in shared memory, this structure stores
+ * some infos that may vary in the multiple process model locally.
+ * For example, the vtpci_ops pointer.
+ */
+struct virtio_hw_internal {
+ const struct virtio_pci_ops *vtpci_ops;
+ struct rte_pci_ioport io;
+};
+
+#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->dev_id].vtpci_ops)
+#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->dev_id].io)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+enum virtio_msix_status {
+ VIRTIO_MSIX_NONE = 0,
+ VIRTIO_MSIX_DISABLED = 1,
+ VIRTIO_MSIX_ENABLED = 2
+};
+
+static inline int
+vtpci_with_feature(struct virtio_crypto_hw *hw, uint64_t bit)
+{
+ return (hw->guest_features & (1ULL << bit)) != 0;
+}
+
+/*
+ * Function declaration from virtio_pci.c
+ */
+int vtpci_cryptodev_init(struct rte_pci_device *dev,
+ struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_reset(struct virtio_crypto_hw *hw);
+
+void vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw);
+
+uint8_t vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status);
+
+uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features);
+
+void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length);
+
+void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length);
+
+uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw);
+
+#endif /* _VIRTIO_PCI_H_ */
diff --git a/drivers/crypto/virtio/virtio_ring.h b/drivers/crypto/virtio/virtio_ring.h
new file mode 100644
index 00000000..ee306745
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_ring.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_RING_H_
+#define _VIRTIO_RING_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers.
+ */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization.
+ */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next".
+ */
+struct vring_desc {
+ uint64_t addr; /* Address (guest-physical). */
+ uint32_t len; /* Length. */
+ uint16_t flags; /* The flags as indicated above. */
+ uint16_t next; /* We chain unused descriptors via this. */
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ volatile uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ * __u16 used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline size_t
+vring_size(unsigned int num, unsigned long align)
+{
+ size_t size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem));
+ return size;
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _VIRTIO_RING_H_ */
diff --git a/drivers/crypto/virtio/virtio_rxtx.c b/drivers/crypto/virtio/virtio_rxtx.c
new file mode 100644
index 00000000..45039284
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_rxtx.c
@@ -0,0 +1,515 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <rte_cryptodev_pmd.h>
+
+#include "virtqueue.h"
+#include "virtio_cryptodev.h"
+#include "virtio_crypto_algs.h"
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp, *dp_tail;
+ struct vq_desc_extra *dxp;
+ uint16_t desc_idx_last = desc_idx;
+
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ desc_idx_last = dp->next;
+ dp = &vq->vq_ring.desc[dp->next];
+ }
+ }
+ dxp->ndescs = 0;
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->vq_desc_head_idx = desc_idx;
+ } else {
+ dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail->next = desc_idx;
+ }
+
+ vq->vq_desc_tail_idx = desc_idx_last;
+ dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq,
+ struct rte_crypto_op **rx_pkts, uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_crypto_op *cop;
+ uint16_t used_idx, desc_idx;
+ uint16_t i;
+ struct virtio_crypto_inhdr *inhdr;
+ struct virtio_crypto_op_cookie *op_cookie;
+
+ /* Caller does the check */
+ for (i = 0; i < num ; i++) {
+ used_idx = (uint16_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+ cop = (struct rte_crypto_op *)
+ vq->vq_descx[desc_idx].crypto_op;
+ if (unlikely(cop == NULL)) {
+ VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
+ "mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ op_cookie = (struct virtio_crypto_op_cookie *)
+ vq->vq_descx[desc_idx].cookie;
+ inhdr = &(op_cookie->inhdr);
+ switch (inhdr->status) {
+ case VIRTIO_CRYPTO_OK:
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case VIRTIO_CRYPTO_ERR:
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_NOTSUPP:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ vq->packets_received_failed++;
+ break;
+ default:
+ break;
+ }
+
+ vq->packets_received_total++;
+
+ rx_pkts[i] = cop;
+ rte_mempool_put(vq->mpool, op_cookie);
+
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ vq->vq_descx[desc_idx].crypto_op = NULL;
+ }
+
+ return i;
+}
+
+static int
+virtqueue_crypto_sym_pkt_header_arrange(
+ struct rte_crypto_op *cop,
+ struct virtio_crypto_op_data_req *data,
+ struct virtio_crypto_session *session)
+{
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_op_data_req *req_data = data;
+ struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
+ struct virtio_crypto_sym_create_session_req *sym_sess_req =
+ &ctrl->u.sym_create_session;
+ struct virtio_crypto_alg_chain_session_para *chain_para =
+ &sym_sess_req->u.chain.para;
+ struct virtio_crypto_cipher_session_para *cipher_para;
+
+ req_data->header.session_id = session->session_id;
+
+ switch (sym_sess_req->op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+
+ cipher_para = &sym_sess_req->u.cipher.para;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.cipher.para.iv_len
+ = session->iv.length;
+
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ req_data->u.sym_req.u.cipher.para.src_data_len;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ req_data->u.sym_req.op_type =
+ VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ cipher_para = &chain_para->cipher_param;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
+ req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
+
+ req_data->u.sym_req.u.chain.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.chain.para.dst_data_len =
+ req_data->u.sym_req.u.chain.para.src_data_len;
+ req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
+ sym_op->cipher.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_cipher =
+ sym_op->cipher.data.length;
+ req_data->u.sym_req.u.chain.para.hash_start_src_offset =
+ sym_op->auth.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_hash =
+ sym_op->auth.data.length;
+ req_data->u.sym_req.u.chain.para.aad_len =
+ chain_para->aad_len;
+
+ if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.hash_param.hash_result_len;
+ if (chain_para->hash_mode ==
+ VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.mac_param.hash_result_len;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_sym_enqueue_xmit(
+ struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ uint16_t idx = 0;
+ uint16_t num_entry;
+ uint16_t needed = 1;
+ uint16_t head_idx;
+ struct vq_desc_extra *dxp;
+ struct vring_desc *start_dp;
+ struct vring_desc *desc;
+ uint64_t indirect_op_data_req_phys_addr;
+ uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
+ uint32_t indirect_vring_addr_offset = req_data_len +
+ sizeof(struct virtio_crypto_inhdr);
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_session *session =
+ (struct virtio_crypto_session *)get_session_private_data(
+ cop->sym->session, cryptodev_virtio_driver_id);
+ struct virtio_crypto_op_data_req *op_data_req;
+ uint32_t hash_result_len = 0;
+ struct virtio_crypto_op_cookie *crypto_op_cookie;
+ struct virtio_crypto_alg_chain_session_para *para;
+
+ if (unlikely(sym_op->m_src->nb_segs != 1))
+ return -EMSGSIZE;
+ if (unlikely(txvq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(txvq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+ head_idx = txvq->vq_desc_head_idx;
+ if (unlikely(head_idx >= txvq->vq_nentries))
+ return -EFAULT;
+ if (unlikely(session == NULL))
+ return -EFAULT;
+
+ dxp = &txvq->vq_descx[head_idx];
+
+ if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
+ return -EFAULT;
+ }
+ crypto_op_cookie = dxp->cookie;
+ indirect_op_data_req_phys_addr =
+ rte_mempool_virt2iova(crypto_op_cookie);
+ op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
+
+ if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
+ return -EFAULT;
+
+ /* status is initialized to VIRTIO_CRYPTO_ERR */
+ ((struct virtio_crypto_inhdr *)
+ ((uint8_t *)op_data_req + req_data_len))->status =
+ VIRTIO_CRYPTO_ERR;
+
+ /* point to indirect vring entry */
+ desc = (struct vring_desc *)
+ ((uint8_t *)op_data_req + indirect_vring_addr_offset);
+ for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
+ desc[idx].next = idx + 1;
+ desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
+
+ idx = 0;
+
+ /* indirect vring: first part, virtio_crypto_op_data_req */
+ desc[idx].addr = indirect_op_data_req_phys_addr;
+ desc[idx].len = req_data_len;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: iv of cipher */
+ if (session->iv.length) {
+ desc[idx].addr = cop->phys_addr + session->iv.offset;
+ desc[idx].len = session->iv.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: additional auth data */
+ if (session->aad.length) {
+ desc[idx].addr = session->aad.phys_addr;
+ desc[idx].len = session->aad.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: src data */
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: dst data */
+ if (sym_op->m_dst) {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ } else {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ }
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+
+ /* indirect vring: digest result */
+ para = &(session->ctrl.u.sym_create_session.u.chain.para);
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ hash_result_len = para->u.hash_param.hash_result_len;
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ hash_result_len = para->u.mac_param.hash_result_len;
+ if (hash_result_len > 0) {
+ desc[idx].addr = sym_op->auth.digest.phys_addr;
+ desc[idx].len = hash_result_len;
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: last part, status returned */
+ desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
+ desc[idx].len = sizeof(struct virtio_crypto_inhdr);
+ desc[idx++].flags = VRING_DESC_F_WRITE;
+
+ num_entry = idx;
+
+ /* save the infos to use when receiving packets */
+ dxp->crypto_op = (void *)cop;
+ dxp->ndescs = needed;
+
+ /* use a single buffer */
+ start_dp = txvq->vq_ring.desc;
+ start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
+ indirect_vring_addr_offset;
+ start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
+ start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
+
+ idx = start_dp[head_idx].next;
+ txvq->vq_desc_head_idx = idx;
+ if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ txvq->vq_desc_tail_idx = idx;
+ txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+ vq_update_avail_ring(txvq, head_idx);
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ int ret;
+
+ switch (cop->type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
+ break;
+ default:
+ VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
+ cop->type);
+ ret = -EFAULT;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+virtio_crypto_vring_start(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw = vq->hw;
+ int i, size = vq->vq_nentries;
+ struct vring *vr = &vq->vq_ring;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ /* Chain all the descriptors in the ring with an END */
+ for (i = 0; i < size - 1; i++)
+ vr->desc[i].next = (uint16_t)(i + 1);
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+
+ /*
+ * Set guest physical address of the virtqueue
+ * in VIRTIO_PCI_QUEUE_PFN config register of device
+ * to share with the backend
+ */
+ if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (hw->cvq) {
+ virtio_crypto_vring_start(hw->cvq);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+ }
+}
+
+void
+virtio_crypto_dataq_start(struct rte_cryptodev *dev)
+{
+ /*
+ * Start data vrings
+ * - Setup vring structure for data queues
+ */
+ uint16_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Start data vring. */
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ virtio_crypto_vring_start(dev->data->queue_pairs[i]);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+/* vring size of data queue is 1024 */
+#define VIRTIO_MBUF_BURST_SZ 1024
+
+uint16_t
+virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq = tx_queue;
+ uint16_t nb_used, num, nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(txvq);
+
+ virtio_rmb();
+
+ num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
+ num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
+ ? num : VIRTIO_MBUF_BURST_SZ);
+
+ if (num == 0)
+ return 0;
+
+ nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
+ VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq;
+ uint16_t nb_tx;
+ int error;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+ if (unlikely(tx_queue == NULL)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
+ return 0;
+ }
+ txvq = tx_queue;
+
+ VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
+ /* nb_segs is always 1 at virtio crypto situation */
+ int need = txm->nb_segs - txvq->vq_free_cnt;
+
+ /*
+ * Positive value indicates it hasn't enough space in vring
+ * descriptors
+ */
+ if (unlikely(need > 0)) {
+ /*
+ * try it again because the receive process may be
+ * free some space
+ */
+ need = txm->nb_segs - txvq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
+ "descriptors to transmit");
+ break;
+ }
+ }
+
+ txvq->packets_sent_total++;
+
+ /* Enqueue Packet buffers */
+ error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
+ if (unlikely(error)) {
+ if (error == ENOSPC)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count = 0");
+ else if (error == EMSGSIZE)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count < 1");
+ else
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue error: %d", error);
+ txvq->packets_sent_failed++;
+ break;
+ }
+ }
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(txvq);
+
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
+ virtqueue_notify(txvq);
+ VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
diff --git a/drivers/crypto/virtio/virtqueue.c b/drivers/crypto/virtio/virtqueue.c
new file mode 100644
index 00000000..fd8be581
--- /dev/null
+++ b/drivers/crypto/virtio/virtqueue.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_crypto.h>
+#include <rte_malloc.h>
+
+#include "virtqueue.h"
+
+void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ /*
+ * Set VRING_AVAIL_F_NO_INTERRUPT to hint host
+ * not to interrupt when it consumes packets
+ * Note: this is only considered a hint to the host
+ */
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+void
+virtqueue_detatch_unused(struct virtqueue *vq)
+{
+ struct rte_crypto_op *cop = NULL;
+
+ int idx;
+
+ if (vq != NULL)
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ cop = vq->vq_descx[idx].crypto_op;
+ if (cop) {
+ if (cop->sym->m_src)
+ rte_pktmbuf_free(cop->sym->m_src);
+ if (cop->sym->m_dst)
+ rte_pktmbuf_free(cop->sym->m_dst);
+ rte_crypto_op_free(cop);
+ vq->vq_descx[idx].crypto_op = NULL;
+ }
+ }
+}
diff --git a/drivers/crypto/virtio/virtqueue.h b/drivers/crypto/virtio/virtqueue.h
new file mode 100644
index 00000000..bf10c657
--- /dev/null
+++ b/drivers/crypto/virtio/virtqueue.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTQUEUE_H_
+#define _VIRTQUEUE_H_
+
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+#include "virtio_logs.h"
+#include "virtio_crypto.h"
+
+struct rte_mbuf;
+
+/*
+ * Per virtio_config.h in Linux.
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ */
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 };
+
+/**
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+struct vq_desc_extra {
+ void *crypto_op;
+ void *cookie;
+ uint16_t ndescs;
+};
+
+struct virtqueue {
+ /**< virtio_crypto_hw structure pointer. */
+ struct virtio_crypto_hw *hw;
+ /**< mem zone to populate RX ring. */
+ const struct rte_memzone *mz;
+ /**< memzone to populate hdr and request. */
+ struct rte_mempool *mpool;
+ uint8_t dev_id; /**< Device identifier. */
+ uint16_t vq_queue_index; /**< PCI queue index */
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
+ unsigned int vq_ring_size;
+ phys_addr_t vq_ring_mem; /**< physical address of vring */
+
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_nentries; /**< vring desc numbers */
+
+ /**
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_desc_tail_idx;
+ /**
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+ uint16_t vq_avail_idx;
+
+ /* Statistics */
+ uint64_t packets_sent_total;
+ uint64_t packets_sent_failed;
+ uint64_t packets_received_total;
+ uint64_t packets_received_failed;
+
+ uint16_t *notify_addr;
+
+ struct vq_desc_extra vq_descx[0];
+};
+
+/**
+ * Tell the backend not to interrupt us.
+ */
+void virtqueue_disable_intr(struct virtqueue *vq);
+
+/**
+ * Get all mbufs to be freed.
+ */
+void virtqueue_detatch_unused(struct virtqueue *vq);
+
+static inline int
+virtqueue_full(const struct virtqueue *vq)
+{
+ return vq->vq_free_cnt == 0;
+}
+
+#define VIRTQUEUE_NUSED(vq) \
+ ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+
+static inline void
+vq_update_avail_idx(struct virtqueue *vq)
+{
+ virtio_wmb();
+ vq->vq_ring.avail->idx = vq->vq_avail_idx;
+}
+
+static inline void
+vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
+ if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ vq->vq_avail_idx++;
+}
+
+static inline int
+virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+}
+
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
+ /*
+ * Ensure updated avail->idx is visible to host.
+ * For virtio on IA, the notificaiton is through io port operation
+ * which is a serialization instruction itself.
+ */
+ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+}
+
+/**
+ * Dump virtqueue internal structures, for debug purpose only.
+ */
+#define VIRTQUEUE_DUMP(vq) do { \
+ uint16_t used_idx, nused; \
+ used_idx = (vq)->vq_ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ VIRTIO_CRYPTO_INIT_LOG_DBG(\
+ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
+ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
+ " avail.flags=0x%x; used.flags=0x%x", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
+ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
+ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+} while (0)
+
+#endif /* _VIRTQUEUE_H_ */
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index ad65b80c..a805b227 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -12,7 +12,7 @@
#include "rte_zuc_pmd_private.h"
-#define ZUC_MAX_BURST 8
+#define ZUC_MAX_BURST 4
#define BYTE_LEN 8
static uint8_t cryptodev_driver_id;
@@ -168,10 +168,10 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
return sess;
}
-/** Encrypt/decrypt mbufs with same cipher key. */
+/** Encrypt/decrypt mbufs. */
static uint8_t
process_zuc_cipher_op(struct rte_crypto_op **ops,
- struct zuc_session *session,
+ struct zuc_session **sessions,
uint8_t num_ops)
{
unsigned i;
@@ -180,6 +180,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
uint8_t *iv[ZUC_MAX_BURST];
uint32_t num_bytes[ZUC_MAX_BURST];
uint8_t *cipher_keys[ZUC_MAX_BURST];
+ struct zuc_session *sess;
for (i = 0; i < num_ops; i++) {
if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
@@ -190,6 +191,8 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
break;
}
+ sess = sessions[i];
+
#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
(ops[i]->sym->m_dst != NULL &&
@@ -211,10 +214,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
+ sess->cipher_iv_offset);
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
- cipher_keys[i] = session->pKey_cipher;
+ cipher_keys[i] = sess->pKey_cipher;
processed_ops++;
}
@@ -225,10 +228,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
return processed_ops;
}
-/** Generate/verify hash from mbufs with same hash key. */
+/** Generate/verify hash from mbufs. */
static int
process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session *session,
+ struct zuc_session **sessions,
uint8_t num_ops)
{
unsigned i;
@@ -237,6 +240,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
uint32_t *dst;
uint32_t length_in_bits;
uint8_t *iv;
+ struct zuc_session *sess;
for (i = 0; i < num_ops; i++) {
/* Data must be byte aligned */
@@ -246,17 +250,19 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
break;
}
+ sess = sessions[i];
+
length_in_bits = ops[i]->sym->auth.data.length;
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->auth.data.offset >> 3);
iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->auth_iv_offset);
+ sess->auth_iv_offset);
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint32_t *)qp->temp_digest;
- sso_zuc_eia3_1_buffer(session->pKey_hash,
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
iv, src,
length_in_bits, dst);
/* Verify digest. */
@@ -266,7 +272,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
} else {
dst = (uint32_t *)ops[i]->sym->auth.digest.data;
- sso_zuc_eia3_1_buffer(session->pKey_hash,
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
iv, src,
length_in_bits, dst);
}
@@ -276,33 +282,34 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
return processed_ops;
}
-/** Process a batch of crypto ops which shares the same session. */
+/** Process a batch of crypto ops which shares the same operation type. */
static int
-process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
+process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
+ struct zuc_session **sessions,
struct zuc_qp *qp, uint8_t num_ops,
uint16_t *accumulated_enqueued_ops)
{
unsigned i;
unsigned enqueued_ops, processed_ops;
- switch (session->op) {
+ switch (op_type) {
case ZUC_OP_ONLY_CIPHER:
processed_ops = process_zuc_cipher_op(ops,
- session, num_ops);
+ sessions, num_ops);
break;
case ZUC_OP_ONLY_AUTH:
- processed_ops = process_zuc_hash_op(qp, ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
num_ops);
break;
case ZUC_OP_CIPHER_AUTH:
- processed_ops = process_zuc_cipher_op(ops, session,
+ processed_ops = process_zuc_cipher_op(ops, sessions,
num_ops);
- process_zuc_hash_op(qp, ops, session, processed_ops);
+ process_zuc_hash_op(qp, ops, sessions, processed_ops);
break;
case ZUC_OP_AUTH_CIPHER:
- processed_ops = process_zuc_hash_op(qp, ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
num_ops);
- process_zuc_cipher_op(ops, session, processed_ops);
+ process_zuc_cipher_op(ops, sessions, processed_ops);
break;
default:
/* Operation not supported. */
@@ -318,10 +325,10 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct zuc_session));
+ memset(sessions[i], 0, sizeof(struct zuc_session));
memset(ops[i]->sym->session, 0,
- rte_cryptodev_get_header_session_size());
- rte_mempool_put(qp->sess_mp, session);
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sessions[i]);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
}
@@ -342,7 +349,10 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
struct rte_crypto_op *curr_c_op;
- struct zuc_session *prev_sess = NULL, *curr_sess = NULL;
+ struct zuc_session *curr_sess;
+ struct zuc_session *sessions[ZUC_MAX_BURST];
+ enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED;
+ enum zuc_operation curr_zuc_op;
struct zuc_qp *qp = queue_pair;
unsigned i;
uint8_t burst_size = 0;
@@ -352,61 +362,70 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
curr_c_op = ops[i];
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
curr_sess = zuc_get_session(qp, curr_c_op);
- if (unlikely(curr_sess == NULL ||
- curr_sess->op == ZUC_OP_NOT_SUPPORTED)) {
+ if (unlikely(curr_sess == NULL)) {
curr_c_op->status =
RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
break;
}
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
+ curr_zuc_op = curr_sess->op;
+
+ /*
+ * Batch ops that share the same operation type
+ * (cipher only, auth only...).
+ */
+ if (burst_size == 0) {
+ prev_zuc_op = curr_zuc_op;
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
+ } else if (curr_zuc_op == prev_zuc_op) {
+ c_ops[burst_size] = curr_c_op;
+ sessions[burst_size] = curr_sess;
+ burst_size++;
/*
* When there are enough ops to process in a batch,
* process them, and start a new batch.
*/
if (burst_size == ZUC_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
+ processed_ops = process_ops(c_ops, curr_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
if (processed_ops < burst_size) {
burst_size = 0;
break;
}
burst_size = 0;
- prev_sess = NULL;
}
} else {
/*
- * Different session, process the ops
- * of the previous session.
+ * Different operation type, process the ops
+ * of the previous type.
*/
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
if (processed_ops < burst_size) {
burst_size = 0;
break;
}
burst_size = 0;
- prev_sess = curr_sess;
+ prev_zuc_op = curr_zuc_op;
- c_ops[burst_size++] = curr_c_op;
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
}
}
if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
+ /* Process the crypto ops of the last operation type. */
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
}
qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
@@ -524,5 +543,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index c3d89a15..f301d8dc 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -7,8 +7,12 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
+endif
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 00068015..5443ef56 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -29,7 +29,7 @@
#include <rte_event_eth_rx_adapter.h>
#include <rte_dpaa_bus.h>
#include <rte_dpaa_logs.h>
-#include <rte_cycles_64.h>
+#include <rte_cycles.h>
#include <dpaa_ethdev.h>
#include "dpaa_eventdev.h"
@@ -571,7 +571,7 @@ dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
return 0;
}
-static const struct rte_eventdev_ops dpaa_eventdev_ops = {
+static struct rte_eventdev_ops dpaa_eventdev_ops = {
.dev_infos_get = dpaa_event_dev_info_get,
.dev_configure = dpaa_event_dev_configure,
.dev_start = dpaa_event_dev_start,
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
index 918fe35c..583e46ca 100644
--- a/drivers/event/dpaa/dpaa_eventdev.h
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -26,7 +26,7 @@
#define DPAA_EVENT_MAX_QUEUE_FLOWS 2048
#define DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
#define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
-#define DPAA_EVENT_MAX_EVENT_PORT RTE_MAX_LCORE
+#define DPAA_EVENT_MAX_EVENT_PORT RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
#define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL
#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID ((uint64_t)-1)
diff --git a/drivers/event/dpaa/meson.build b/drivers/event/dpaa/meson.build
new file mode 100644
index 00000000..0914f858
--- /dev/null
+++ b/drivers/event/dpaa/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['pmd_dpaa']
+sources = files('dpaa_eventdev.c')
+
+allow_experimental_apis = true
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index b26862cd..5e1a6320 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -18,7 +18,8 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
-LDLIBS += -lrte_eal -lrte_eventdev -lrte_bus_fslmc -lrte_pmd_dpaa2
+LDLIBS += -lrte_eal -lrte_eventdev
+LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2
LDLIBS += -lrte_bus_vdev
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
@@ -28,6 +29,9 @@ EXPORT_MAP := rte_pmd_dpaa2_event_version.map
LIBABIVER := 1
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
#
# all source are stored in SRCS-y
#
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index c3e6fbff..cd801bfb 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -72,7 +72,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
+ DPAA2_EVENTDEV_ERR("Failure in affining portal");
return 0;
}
}
@@ -87,10 +87,10 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
const struct rte_event *event = &ev[num_tx + loop];
if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
- fqid = evq_info->dpci->queue[
+ fqid = evq_info->dpci->rx_queue[
DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
else
- fqid = evq_info->dpci->queue[
+ fqid = evq_info->dpci->rx_queue[
DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
/* Prepare enqueue descriptor */
@@ -122,11 +122,12 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
if (!loop)
return num_tx;
frames_to_send = loop;
- DPAA2_EVENTDEV_ERR("Unable to allocate memory");
+ DPAA2_EVENTDEV_ERR(
+ "Unable to allocate event object");
goto send_partial;
}
rte_memcpy(ev_temp, event, sizeof(struct rte_event));
- DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
+ DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
DPAA2_SET_FD_LEN((&fd_arr[loop]),
sizeof(struct rte_event));
}
@@ -153,26 +154,12 @@ dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
{
struct epoll_event epoll_ev;
- int ret, i = 0;
qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
QBMAN_SWP_INTERRUPT_DQRI);
-RETRY:
- ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
+ epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
&epoll_ev, 1, timeout_ticks);
- if (ret < 1) {
- /* sometimes due to some spurious interrupts epoll_wait fails
- * with errno EINTR. so here we are retrying epoll_wait in such
- * case to avoid the problem.
- */
- if (errno == EINTR) {
- DPAA2_EVENTDEV_DEBUG("epoll_wait fails\n");
- if (i++ > 10)
- DPAA2_EVENTDEV_DEBUG("Dequeue burst Failed\n");
- goto RETRY;
- }
- }
}
static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
@@ -182,7 +169,7 @@ static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
struct rte_event *ev)
{
struct rte_event *ev_temp =
- (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
RTE_SET_USED(rxq);
@@ -199,7 +186,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
struct rte_event *ev)
{
struct rte_event *ev_temp =
- (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
RTE_SET_USED(swp);
@@ -227,7 +214,7 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
+ DPAA2_EVENTDEV_ERR("Failure in affining portal");
return 0;
}
}
@@ -258,12 +245,12 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
qbman_swp_prefetch_dqrr_next(swp);
fd = qbman_result_DQ_fd(dq);
- rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
+ rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
if (rxq) {
rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
} else {
qbman_swp_dqrr_consume(swp, dq);
- DPAA2_EVENTDEV_ERR("Null Return VQ received\n");
+ DPAA2_EVENTDEV_ERR("Null Return VQ received");
return 0;
}
@@ -335,7 +322,7 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
priv->event_dev_cfg = conf->event_dev_cfg;
DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
- dev->data->dev_id);
+ dev->data->dev_id);
return 0;
}
@@ -473,7 +460,6 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
0, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id);
- evq_info->link = 0;
}
return (int)nb_unlinks;
@@ -494,23 +480,20 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
for (i = 0; i < nb_links; i++) {
evq_info = &priv->evq_info[queues[i]];
- if (evq_info->link)
- continue;
ret = dpio_add_static_dequeue_channel(
dpaa2_portal->dpio_dev->dpio,
CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id, &channel_index);
if (ret < 0) {
- DPAA2_EVENTDEV_ERR("Static dequeue cfg failed with ret: %d\n",
- ret);
+ DPAA2_EVENTDEV_ERR(
+ "Static dequeue config failed: err(%d)", ret);
goto err;
}
qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
channel_index, 1);
evq_info->dpcon->channel_index = channel_index;
- evq_info->link = 1;
}
RTE_SET_USED(priorities);
@@ -524,7 +507,6 @@ err:
dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
0, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id);
- evq_info->link = 0;
}
return ret;
}
@@ -587,8 +569,8 @@ dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_attach(eth_dev, i,
dpcon_id, queue_conf);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
- ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
goto fail;
}
}
@@ -620,7 +602,8 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
dpcon_id, queue_conf);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
return ret;
}
return 0;
@@ -639,8 +622,8 @@ dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
ret = dpaa2_eth_eventq_detach(eth_dev, i);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
- ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
return ret;
}
}
@@ -662,7 +645,8 @@ dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
return ret;
}
@@ -693,7 +677,7 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
return 0;
}
-static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
+static struct rte_eventdev_ops dpaa2_eventdev_ops = {
.dev_infos_get = dpaa2_eventdev_info_get,
.dev_configure = dpaa2_eventdev_configure,
.dev_start = dpaa2_eventdev_start,
@@ -730,20 +714,21 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
- dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
dpaa2_eventdev_process_parallel;
- dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
dpaa2_eventdev_process_atomic;
for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
- rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
+ rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
ret = dpci_set_rx_queue(&dpci_dev->dpci,
CMD_PRI_LOW,
dpci_dev->token, i,
&rx_queue_cfg);
if (ret) {
DPAA2_EVENTDEV_ERR(
- "set_rx_q failed with err code: %d", ret);
+ "DPCI Rx queue setup failed: err(%d)",
+ ret);
return ret;
}
}
@@ -763,7 +748,7 @@ dpaa2_eventdev_create(const char *name)
sizeof(struct dpaa2_eventdev),
rte_socket_id());
if (eventdev == NULL) {
- DPAA2_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
+ DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
goto fail;
}
@@ -798,7 +783,7 @@ dpaa2_eventdev_create(const char *name)
ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
if (ret) {
DPAA2_EVENTDEV_ERR(
- "dpci setup failed with err code: %d", ret);
+ "DPCI setup failed: err(%d)", ret);
return ret;
}
priv->max_event_queues++;
@@ -836,3 +821,12 @@ static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
};
RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
+
+RTE_INIT(dpaa2_eventdev_init_log);
+static void
+dpaa2_eventdev_init_log(void)
+{
+ dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
+ if (dpaa2_logtype_event >= 0)
+ rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);
+}
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index 91c8f2a3..229f66af 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -63,7 +63,6 @@ struct evq_info_t {
struct dpaa2_dpci_dev *dpci;
/* Configuration provided by the user */
uint32_t event_queue_cfg;
- uint8_t link;
};
struct dpaa2_eventdev {
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
index 7d250c3f..48f1abd1 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -9,13 +9,15 @@
extern int dpaa2_logtype_event;
#define DPAA2_EVENTDEV_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "%s(): " fmt "\n", \
- __func__, ##args)
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "dpaa2_event: " \
+ fmt "\n", ##args)
+
+#define DPAA2_EVENTDEV_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_event, "dpaa2_event: %s(): " \
+ fmt "\n", __func__, ##args)
#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_LOG(DEBUG, " >>")
-#define DPAA2_EVENTDEV_DEBUG(fmt, args...) \
- DPAA2_EVENTDEV_LOG(DEBUG, fmt, ## args)
#define DPAA2_EVENTDEV_INFO(fmt, args...) \
DPAA2_EVENTDEV_LOG(INFO, fmt, ## args)
#define DPAA2_EVENTDEV_ERR(fmt, args...) \
diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
index f2377b98..d64e588a 100644
--- a/drivers/event/dpaa2/dpaa2_hw_dpcon.c
+++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -20,11 +20,11 @@
#include <rte_dev.h>
#include <rte_ethdev_driver.h>
-#include <fslmc_logs.h>
#include <rte_fslmc.h>
#include <mc/fsl_dpcon.h>
#include <portal/dpaa2_hw_pvt.h>
#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev);
static struct dpcon_dev_list dpcon_dev_list
@@ -42,7 +42,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
/* Allocate DPAA2 dpcon handle */
dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0);
if (!dpcon_node) {
- PMD_DRV_LOG(ERR, "Memory allocation failed for DPCON Device");
+ DPAA2_EVENTDEV_ERR(
+ "Memory allocation failed for dpcon device");
return -1;
}
@@ -51,8 +52,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
ret = dpcon_open(&dpcon_node->dpcon,
CMD_PRI_LOW, dpcon_id, &dpcon_node->token);
if (ret) {
- PMD_DRV_LOG(ERR, "Resource alloc failure with err code: %d",
- ret);
+ DPAA2_EVENTDEV_ERR("Unable to open dpcon device: err(%d)",
+ ret);
rte_free(dpcon_node);
return -1;
}
@@ -61,8 +62,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
ret = dpcon_get_attributes(&dpcon_node->dpcon,
CMD_PRI_LOW, dpcon_node->token, &attr);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "Reading device failed with err code: %d",
- ret);
+ DPAA2_EVENTDEV_ERR("dpcon attribute fetch failed: err(%d)",
+ ret);
rte_free(dpcon_node);
return -1;
}
@@ -75,8 +76,6 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next);
- RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpcon.%d]\n", dpcon_id);
-
return 0;
}
diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
new file mode 100644
index 00000000..de7a4615
--- /dev/null
+++ b/drivers/event/dpaa2/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['bus_vdev', 'pmd_dpaa2']
+sources = files('dpaa2_hw_dpcon.c',
+ 'dpaa2_eventdev.c')
+
+allow_experimental_apis = true
diff --git a/drivers/event/meson.build b/drivers/event/meson.build
index d7bc4854..e9511993 100644
--- a/drivers/event/meson.build
+++ b/drivers/event/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['skeleton', 'sw', 'octeontx']
+drivers = ['dpaa', 'dpaa2', 'octeontx', 'skeleton', 'sw']
std_deps = ['eventdev', 'kvargs']
config_flag_fmt = 'RTE_LIBRTE_@0@_EVENTDEV_PMD'
driver_name_fmt = 'rte_pmd_@0@_event'
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index 0e49efd8..90ad2217 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -10,10 +10,12 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_octeontx_ssovf.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
+CFLAGS += -DALLOW_EXPERIMENTAL_API
-LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx -lrte_pmd_octeontx
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_common_octeontx -lrte_pmd_octeontx
LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs
LDLIBS += -lrte_bus_vdev
@@ -27,18 +29,26 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev_selftest.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_probe.c
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
+CFLAGS_timvf_worker.o += -fno-prefetch-loop-arrays
ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
CFLAGS_ssovf_worker.o += -Ofast
+CFLAGS_timvf_worker.o += -Ofast
else
CFLAGS_ssovf_worker.o += -O3 -ffast-math
+CFLAGS_timvf_worker.o += -O3 -ffast-math
endif
else
CFLAGS_ssovf_worker.o += -Ofast
+CFLAGS_timvf_worker.o += -Ofast
endif
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/octeontx/meson.build b/drivers/event/octeontx/meson.build
index 358fc9fc..04185533 100644
--- a/drivers/event/octeontx/meson.build
+++ b/drivers/event/octeontx/meson.build
@@ -3,7 +3,12 @@
sources = files('ssovf_worker.c',
'ssovf_evdev.c',
- 'ssovf_evdev_selftest.c'
+ 'ssovf_evdev_selftest.c',
+ 'ssovf_probe.c',
+ 'timvf_worker.c',
+ 'timvf_evdev.c',
+ 'timvf_probe.c'
)
-deps += ['mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
+allow_experimental_apis = true
+deps += ['common_octeontx', 'mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index a1086077..2df70b52 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -18,8 +18,10 @@
#include <rte_bus_vdev.h>
#include "ssovf_evdev.h"
+#include "timvf_evdev.h"
int otx_logtype_ssovf;
+static uint8_t timvf_enable_stats;
RTE_INIT(otx_ssovf_init_log);
static void
@@ -49,7 +51,7 @@ ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
hdr.vfid = 0;
memset(info, 0, len);
- return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len);
+ return octeontx_mbox_send(&hdr, NULL, 0, info, len);
}
struct ssovf_mbox_getwork_wait {
@@ -69,7 +71,7 @@ ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
hdr.vfid = 0;
tmo_set.wait_ns = timeout_ns;
- ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0);
+ ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0);
if (ret)
ssovf_log_err("Failed to set getwork timeout(%d)", ret);
@@ -99,7 +101,7 @@ ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
grp.affinity = 0xff;
grp.priority = prio / 32; /* Normalize to 0 to 7 */
- ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0);
+ ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0);
if (ret)
ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
@@ -125,7 +127,7 @@ ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
memset(&ns2iter, 0, len);
ns2iter.wait_ns = ns;
- ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
+ ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
if (ret < 0 || (ret != len)) {
ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
return -EIO;
@@ -276,7 +278,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
return -ENOMEM;
}
- ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
+ ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
if (ws->base == NULL) {
rte_free(ws);
ssovf_log_err("Failed to get hws base addr port=%d", port_id);
@@ -290,7 +292,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
ws->port = port_id;
for (q = 0; q < edev->nb_event_queues; q++) {
- ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
+ ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
if (ws->grps[q] == NULL) {
rte_free(ws);
ssovf_log_err("Failed to get grp%d base addr", q);
@@ -529,9 +531,9 @@ ssovf_start(struct rte_eventdev *dev)
for (i = 0; i < edev->nb_event_queues; i++) {
/* Consume all the events through HWS0 */
- ssows_flush_events(dev->data->ports[0], i);
+ ssows_flush_events(dev->data->ports[0], i, NULL, NULL);
- base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
base += SSO_VHGRP_QCTL;
ssovf_write64(1, base); /* Enable SSO group */
}
@@ -541,6 +543,16 @@ ssovf_start(struct rte_eventdev *dev)
}
static void
+ssows_handle_event(void *arg, struct rte_event event)
+{
+ struct rte_eventdev *dev = arg;
+
+ if (dev->dev_ops->dev_stop_flush != NULL)
+ dev->dev_ops->dev_stop_flush(dev->data->dev_id, event,
+ dev->data->dev_stop_flush_arg);
+}
+
+static void
ssovf_stop(struct rte_eventdev *dev)
{
struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
@@ -557,9 +569,10 @@ ssovf_stop(struct rte_eventdev *dev)
for (i = 0; i < edev->nb_event_queues; i++) {
/* Consume all the events through HWS0 */
- ssows_flush_events(dev->data->ports[0], i);
+ ssows_flush_events(dev->data->ports[0], i,
+ ssows_handle_event, dev);
- base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
base += SSO_VHGRP_QCTL;
ssovf_write64(0, base); /* Disable SSO group */
}
@@ -590,8 +603,16 @@ ssovf_selftest(const char *key __rte_unused, const char *value,
return 0;
}
+static int
+ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+{
+ return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
+ timvf_enable_stats);
+}
+
/* Initialize and register event driver with DPDK Application */
-static const struct rte_eventdev_ops ssovf_ops = {
+static struct rte_eventdev_ops ssovf_ops = {
.dev_infos_get = ssovf_info_get,
.dev_configure = ssovf_configure,
.queue_def_conf = ssovf_queue_def_conf,
@@ -610,6 +631,8 @@ static const struct rte_eventdev_ops ssovf_ops = {
.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+ .timer_adapter_caps_get = ssovf_timvf_caps_get,
+
.dev_selftest = test_eventdev_octeontx,
.dump = ssovf_dump,
@@ -621,7 +644,7 @@ static const struct rte_eventdev_ops ssovf_ops = {
static int
ssovf_vdev_probe(struct rte_vdev_device *vdev)
{
- struct octeontx_ssovf_info oinfo;
+ struct ssovf_info oinfo;
struct ssovf_mbox_dev_info info;
struct ssovf_evdev *edev;
struct rte_eventdev *eventdev;
@@ -633,6 +656,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
static const char *const args[] = {
SSOVF_SELFTEST_ARG,
+ TIMVF_ENABLE_STATS_ARG,
NULL
};
@@ -660,6 +684,15 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
rte_kvargs_free(kvlist);
return ret;
}
+
+ ret = rte_kvargs_process(kvlist,
+ TIMVF_ENABLE_STATS_ARG,
+ ssovf_selftest, &timvf_enable_stats);
+ if (ret != 0) {
+ ssovf_log_err("%s: Error in timvf stats", name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
}
rte_kvargs_free(kvlist);
@@ -679,7 +712,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
return 0;
}
- ret = octeontx_ssovf_info(&oinfo);
+ ret = ssovf_info(&oinfo);
if (ret) {
ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
goto error;
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index d1825b4f..18293e96 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -119,6 +119,16 @@ do { \
} while (0)
#endif
+struct ssovf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_ssovfs; /* Total sso groups available in domain */
+ uint8_t total_ssowvfs;/* Total sso hws available in domain */
+};
+
+enum ssovf_type {
+ OCTEONTX_SSO_GROUP, /* SSO group vf */
+ OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
+};
struct ssovf_evdev {
uint8_t max_event_queues;
@@ -164,8 +174,13 @@ uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
uint64_t timeout_ticks);
uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks);
-void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
+
+typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
+void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
+ ssows_handle_event_t fn, void *arg);
void ssows_reset(struct ssows *ws);
+int ssovf_info(struct ssovf_info *info);
+void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
int test_eventdev_octeontx(void);
#endif /* __SSOVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c
index 5e012a95..239362fc 100644
--- a/drivers/event/octeontx/ssovf_evdev_selftest.c
+++ b/drivers/event/octeontx/ssovf_evdev_selftest.c
@@ -688,6 +688,40 @@ test_multi_queue_enq_multi_port_deq(void)
nr_ports, 0xff /* invalid */);
}
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+ unsigned int *count = arg;
+
+ RTE_SET_USED(dev_id);
+ if (event.event_type == RTE_EVENT_TYPE_CPU)
+ *count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+ unsigned int total_events = MAX_EVENTS, count = 0;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+ if (ret)
+ return -2;
+ rte_event_dev_stop(evdev);
+ ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+ if (ret)
+ return -3;
+ RTE_TEST_ASSERT_EQUAL(total_events, count,
+ "count mismatch total_events=%d count=%d",
+ total_events, count);
+ return 0;
+}
+
static int
validate_queue_to_port_single_link(uint32_t index, uint8_t port,
struct rte_event *ev)
@@ -1415,6 +1449,8 @@ test_eventdev_octeontx(void)
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_multi_queue_enq_single_port_deq);
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_dev_stop_flush);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_multi_queue_enq_multi_port_deq);
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_queue_to_port_single_link);
diff --git a/drivers/mempool/octeontx/octeontx_ssovf.c b/drivers/event/octeontx/ssovf_probe.c
index 97b24066..b3db596d 100644
--- a/drivers/mempool/octeontx/octeontx_ssovf.c
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -10,7 +10,7 @@
#include <rte_bus_pci.h>
#include "octeontx_mbox.h"
-#include "octeontx_pool_logs.h"
+#include "ssovf_evdev.h"
#define PCI_VENDOR_ID_CAVIUM 0x177D
#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
@@ -52,7 +52,7 @@ static struct ssodev sdev;
/* Interface functions */
int
-octeontx_ssovf_info(struct octeontx_ssovf_info *info)
+ssovf_info(struct ssovf_info *info)
{
uint8_t i;
uint16_t domain;
@@ -97,7 +97,7 @@ octeontx_ssovf_info(struct octeontx_ssovf_info *info)
}
void*
-octeontx_ssovf_bar(enum octeontx_ssovf_type type, uint8_t id, uint8_t bar)
+ssovf_bar(enum ssovf_type type, uint8_t id, uint8_t bar)
{
if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
type > OCTEONTX_SSO_HWS)
@@ -142,6 +142,7 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
uint16_t vfid;
struct ssowvf_res *res;
struct ssowvf_identify *id;
+ uint8_t *ram_mbox_base;
RTE_SET_USED(pci_drv);
@@ -180,6 +181,14 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
res->domain = id->domain;
sdev.total_ssowvfs++;
+ if (vfid == 0) {
+ ram_mbox_base = ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
+ if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base)) {
+ mbox_log_err("Invalid Failed to set ram mbox base");
+ return -EINVAL;
+ }
+ }
+
rte_wmb();
mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
res->vfid, sdev.total_ssowvfs);
@@ -213,6 +222,7 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
uint16_t vfid;
uint8_t *idreg;
struct ssovf_res *res;
+ uint8_t *reg;
RTE_SET_USED(pci_drv);
@@ -246,6 +256,15 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
res->domain = val & 0xffff;
sdev.total_ssovfs++;
+ if (vfid == 0) {
+ reg = ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
+ reg += SSO_VHGRP_PF_MBOX(1);
+ if (octeontx_mbox_set_reg(reg)) {
+ mbox_log_err("Invalid Failed to set mbox_reg");
+ return -EINVAL;
+ }
+ }
+
rte_wmb();
mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
res->vfid, sdev.total_ssovfs);
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 753c1e9f..d8bbc714 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -198,13 +198,13 @@ ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
}
void
-ssows_flush_events(struct ssows *ws, uint8_t queue_id)
+ssows_flush_events(struct ssows *ws, uint8_t queue_id,
+ ssows_handle_event_t fn, void *arg)
{
uint32_t reg_off;
- uint64_t aq_cnt = 1;
- uint64_t cq_ds_cnt = 1;
- uint64_t enable, get_work0, get_work1;
- uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
+ struct rte_event ev;
+ uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
+ uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
enable = ssovf_read64(base + SSO_VHGRP_QCTL);
if (!enable)
@@ -219,11 +219,10 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id)
cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
/* Extract cq and ds count */
cq_ds_cnt &= 0x1FFF1FFF0000;
- ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
+ ssows_get_work(ws, &ev);
+ if (fn != NULL && ev.u64 != 0)
+ fn(arg, ev);
}
-
- RTE_SET_USED(get_work0);
- RTE_SET_USED(get_work1);
}
void
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
new file mode 100644
index 00000000..c4fbd2d8
--- /dev/null
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "timvf_evdev.h"
+
+int otx_logtype_timvf;
+
+RTE_INIT(otx_timvf_init_log);
+static void
+otx_timvf_init_log(void)
+{
+ otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
+ if (otx_logtype_timvf >= 0)
+ rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
+}
+
+struct __rte_packed timvf_mbox_dev_info {
+ uint64_t ring_active[4];
+ uint64_t clk_freq;
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+static int
+timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct timvf_mbox_dev_info);
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_GET_DEV_INFO;
+ hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
+
+ memset(info, 0, len);
+ return octeontx_mbox_send(&hdr, NULL, 0, info, len);
+}
+
+static void
+timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer_adapter_info *adptr_info)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ adptr_info->max_tmo_ns = timr->max_tout;
+ adptr_info->min_resolution_ns = timr->tck_nsec;
+ rte_memcpy(&adptr_info->conf, &adptr->data->conf,
+ sizeof(struct rte_event_timer_adapter_conf));
+}
+
+static int
+timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct timvf_ctrl_reg);
+ int ret;
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_SET_RING_INFO;
+ hdr.vfid = ring_id;
+
+ ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
+ if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
+ return -EACCES;
+ return 0;
+}
+
+static int
+timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_RING_START_CYC_GET;
+ hdr.vfid = ring_id;
+ *now = 0;
+ return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
+}
+
+static int
+optimize_bucket_parameters(struct timvf_ring *timr)
+{
+ uint32_t hbkts;
+ uint32_t lbkts;
+ uint64_t tck_nsec;
+
+ hbkts = rte_align32pow2(timr->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ hbkts = 0;
+
+ lbkts = rte_align32prevpow2(timr->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ lbkts = 0;
+
+ if (!hbkts && !lbkts)
+ return 0;
+
+ if (!hbkts) {
+ timr->nb_bkts = lbkts;
+ goto end;
+ } else if (!lbkts) {
+ timr->nb_bkts = hbkts;
+ goto end;
+ }
+
+ timr->nb_bkts = (hbkts - timr->nb_bkts) <
+ (timr->nb_bkts - lbkts) ? hbkts : lbkts;
+end:
+ timr->get_target_bkt = bkt_and;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
+ (timr->nb_bkts - 1)), 10);
+ return 1;
+}
+
+static int
+timvf_ring_start(const struct rte_event_timer_adapter *adptr)
+{
+ int ret;
+ uint8_t use_fpa = 0;
+ uint64_t interval;
+ uintptr_t pool;
+ struct timvf_ctrl_reg rctrl;
+ struct timvf_mbox_dev_info dinfo;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_mbox_dev_info_get(&dinfo);
+ if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
+ return -EINVAL;
+
+ /* Calculate the interval cycles according to clock source. */
+ switch (timr->clk_src) {
+ case TIM_CLK_SRC_SCLK:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ case TIM_CLK_SRC_GPIO:
+ /* GPIO doesn't work on tck_nsec. */
+ interval = 0;
+ break;
+ case TIM_CLK_SRC_GTI:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ case TIM_CLK_SRC_PTP:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ default:
+ timvf_log_err("Unsupported clock source configured %d",
+ timr->clk_src);
+ return -EINVAL;
+ }
+
+ if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
+ use_fpa = 1;
+
+ /*CTRL0 register.*/
+ rctrl.rctrl0 = interval;
+
+ /*CTRL1 register.*/
+ rctrl.rctrl1 = (uint64_t)(timr->clk_src) << 51 |
+ 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
+ 1ull << 47 /* ENA */ |
+ 1ull << 44 /* ENA_LDWB */ |
+ (timr->nb_bkts - 1);
+
+ rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
+
+ if (use_fpa) {
+ pool = (uintptr_t)((struct rte_mempool *)
+ timr->chunk_pool)->pool_id;
+ ret = octeontx_fpa_bufpool_gpool(pool);
+ if (ret < 0) {
+ timvf_log_dbg("Unable to get gaura id");
+ ret = -ENOMEM;
+ goto error;
+ }
+ timvf_write64((uint64_t)ret,
+ (uint8_t *)timr->vbar0 + TIM_VRING_AURA);
+ } else {
+ rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
+ }
+
+ timvf_write64((uintptr_t)timr->bkt,
+ (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
+ timvf_set_chunk_refill(timr, use_fpa);
+ if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
+ ret = -EACCES;
+ goto error;
+ }
+
+ if (timvf_get_start_cyc(&timr->ring_start_cyc,
+ timr->tim_ring_id) < 0) {
+ ret = -EACCES;
+ goto error;
+ }
+ timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
+ timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
+ timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
+ " maxtmo %"PRIu64"\n",
+ timr->nb_bkts, timr->tck_nsec, interval,
+ timr->max_tout);
+
+ return 0;
+error:
+ rte_free(timr->bkt);
+ rte_mempool_free(timr->chunk_pool);
+ return ret;
+}
+
+static int
+timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ struct timvf_ctrl_reg rctrl = {0};
+ rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
+ rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
+ rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
+ rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
+
+ if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
+ return -EACCES;
+ return 0;
+}
+
+static int
+timvf_ring_create(struct rte_event_timer_adapter *adptr)
+{
+ char pool_name[25];
+ int ret;
+ uint64_t nb_timers;
+ struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
+ struct timvf_ring *timr;
+ struct timvf_info tinfo;
+ const char *mempool_ops;
+ unsigned int mp_flags = 0;
+
+ if (timvf_info(&tinfo) < 0)
+ return -ENODEV;
+
+ if (adptr->data->id >= tinfo.total_timvfs)
+ return -ENODEV;
+
+ timr = rte_zmalloc("octeontx_timvf_priv",
+ sizeof(struct timvf_ring), 0);
+ if (timr == NULL)
+ return -ENOMEM;
+
+ adptr->data->adapter_priv = timr;
+ /* Check config parameters. */
+ if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
+ (!rcfg->timer_tick_ns ||
+ rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
+ timvf_log_err("Too low timer ticks");
+ goto cfg_err;
+ }
+
+ timr->clk_src = (int) rcfg->clk_src;
+ timr->tim_ring_id = adptr->data->id;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
+ timr->max_tout = rcfg->max_tmo_ns;
+ timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
+ timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
+ timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
+ nb_timers = rcfg->nb_timers;
+ timr->get_target_bkt = bkt_mod;
+
+ timr->nb_chunks = nb_timers / nb_chunk_slots;
+
+ /* Try to optimize the bucket parameters. */
+ if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
+ && !rte_is_power_of_2(timr->nb_bkts)) {
+ if (optimize_bucket_parameters(timr)) {
+ timvf_log_info("Optimized configured values");
+ timvf_log_dbg("nb_bkts : %"PRIu32"", timr->nb_bkts);
+ timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
+ } else
+ timvf_log_info("Failed to Optimize configured values");
+ }
+
+ if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
+ mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
+ timvf_log_info("Using single producer mode");
+ }
+
+ timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
+ (timr->nb_bkts) * sizeof(struct tim_mem_bucket),
+ 0);
+ if (timr->bkt == NULL)
+ goto mem_err;
+
+ snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
+ timr->tim_ring_id);
+ timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
+ timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
+ mp_flags);
+
+ if (!timr->chunk_pool) {
+ rte_free(timr->bkt);
+ timvf_log_err("Unable to create chunkpool.");
+ return -ENOMEM;
+ }
+
+ mempool_ops = rte_mbuf_best_mempool_ops();
+ ret = rte_mempool_set_ops_byname(timr->chunk_pool,
+ mempool_ops, NULL);
+
+ if (ret != 0) {
+ timvf_log_err("Unable to set chunkpool ops.");
+ goto mem_err;
+ }
+
+ ret = rte_mempool_populate_default(timr->chunk_pool);
+ if (ret < 0) {
+ timvf_log_err("Unable to set populate chunkpool.");
+ goto mem_err;
+ }
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
+ timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
+ timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
+
+ return 0;
+mem_err:
+ rte_free(timr);
+ return -ENOMEM;
+cfg_err:
+ rte_free(timr);
+ return -EINVAL;
+}
+
+static int
+timvf_ring_free(struct rte_event_timer_adapter *adptr)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ rte_mempool_free(timr->chunk_pool);
+ rte_free(timr->bkt);
+ rte_free(adptr->data->adapter_priv);
+ return 0;
+}
+
+static int
+timvf_stats_get(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ struct timvf_ring *timr = adapter->data->adapter_priv;
+ uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
+
+ stats->evtim_exp_count = timr->tim_arm_cnt;
+ stats->ev_enq_count = timr->tim_arm_cnt;
+ stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
+ &timr->fast_div);
+ return 0;
+}
+
+static int
+timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
+{
+ struct timvf_ring *timr = adapter->data->adapter_priv;
+
+ timr->tim_arm_cnt = 0;
+ return 0;
+}
+
+static struct rte_event_timer_adapter_ops timvf_ops = {
+ .init = timvf_ring_create,
+ .uninit = timvf_ring_free,
+ .start = timvf_ring_start,
+ .stop = timvf_ring_stop,
+ .get_info = timvf_ring_info_get,
+};
+
+int
+timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
+ uint8_t enable_stats)
+{
+ RTE_SET_USED(dev);
+
+ if (enable_stats) {
+ timvf_ops.stats_get = timvf_stats_get;
+ timvf_ops.stats_reset = timvf_stats_reset;
+ }
+
+ if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
+ timvf_ops.arm_burst = enable_stats ?
+ timvf_timer_arm_burst_sp_stats :
+ timvf_timer_arm_burst_sp;
+ else
+ timvf_ops.arm_burst = enable_stats ?
+ timvf_timer_arm_burst_mp_stats :
+ timvf_timer_arm_burst_mp;
+
+ timvf_ops.arm_tmo_tick_burst = enable_stats ?
+ timvf_timer_arm_tmo_brst_stats :
+ timvf_timer_arm_tmo_brst;
+ timvf_ops.cancel_burst = timvf_timer_cancel_burst;
+ *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
+ *ops = &timvf_ops;
+ return 0;
+}
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
new file mode 100644
index 00000000..0185593f
--- /dev/null
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __TIMVF_EVDEV_H__
+#define __TIMVF_EVDEV_H__
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_eventdev.h>
+#include <rte_event_timer_adapter.h>
+#include <rte_event_timer_adapter_pmd.h>
+#include <rte_io.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_mempool.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_prefetch.h>
+#include <rte_reciprocal.h>
+
+#include <octeontx_mbox.h>
+#include <octeontx_fpavf.h>
+
+#define timvf_log(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \
+ "[%s] %s() " fmt "\n", \
+ RTE_STR(event_timer_octeontx), __func__, ## args)
+
+#define timvf_log_info(fmt, ...) timvf_log(INFO, fmt, ##__VA_ARGS__)
+#define timvf_log_dbg(fmt, ...) timvf_log(DEBUG, fmt, ##__VA_ARGS__)
+#define timvf_log_err(fmt, ...) timvf_log(ERR, fmt, ##__VA_ARGS__)
+#define timvf_func_trace timvf_log_dbg
+
+#define TIM_COPROC (8)
+#define TIM_GET_DEV_INFO (1)
+#define TIM_GET_RING_INFO (2)
+#define TIM_SET_RING_INFO (3)
+#define TIM_RING_START_CYC_GET (4)
+
+#define TIM_MAX_RINGS (64)
+#define TIM_DEV_PER_NODE (1)
+#define TIM_VF_PER_DEV (64)
+#define TIM_RING_PER_DEV (TIM_VF_PER_DEV)
+#define TIM_RING_NODE_SHIFT (6)
+#define TIM_RING_MASK ((TIM_RING_PER_DEV) - 1)
+#define TIM_RING_INVALID (-1)
+
+#define TIM_MIN_INTERVAL (1E3)
+#define TIM_MAX_INTERVAL ((1ull << 32) - 1)
+#define TIM_MAX_BUCKETS (1ull << 20)
+#define TIM_CHUNK_SIZE (4096)
+#define TIM_MAX_CHUNKS_PER_BUCKET (1ull << 32)
+
+#define TIMVF_MAX_BURST (8)
+
+/* TIM VF Control/Status registers (CSRs): */
+/* VF_BAR0: */
+#define TIM_VF_NRSPERR_INT (0x0)
+#define TIM_VF_NRSPERR_INT_W1S (0x8)
+#define TIM_VF_NRSPERR_ENA_W1C (0x10)
+#define TIM_VF_NRSPERR_ENA_W1S (0x18)
+#define TIM_VRING_FR_RN_CYCLES (0x20)
+#define TIM_VRING_FR_RN_GPIOS (0x28)
+#define TIM_VRING_FR_RN_GTI (0x30)
+#define TIM_VRING_FR_RN_PTP (0x38)
+#define TIM_VRING_CTL0 (0x40)
+#define TIM_VRING_CTL1 (0x50)
+#define TIM_VRING_CTL2 (0x60)
+#define TIM_VRING_BASE (0x100)
+#define TIM_VRING_AURA (0x108)
+#define TIM_VRING_REL (0x110)
+
+#define TIM_CTL1_W0_S_BUCKET 20
+#define TIM_CTL1_W0_M_BUCKET ((1ull << (40 - 20)) - 1)
+
+#define TIM_BUCKET_W1_S_NUM_ENTRIES (0) /*Shift*/
+#define TIM_BUCKET_W1_M_NUM_ENTRIES ((1ull << (32 - 0)) - 1)
+#define TIM_BUCKET_W1_S_SBT (32)
+#define TIM_BUCKET_W1_M_SBT ((1ull << (33 - 32)) - 1)
+#define TIM_BUCKET_W1_S_HBT (33)
+#define TIM_BUCKET_W1_M_HBT ((1ull << (34 - 33)) - 1)
+#define TIM_BUCKET_W1_S_BSK (34)
+#define TIM_BUCKET_W1_M_BSK ((1ull << (35 - 34)) - 1)
+#define TIM_BUCKET_W1_S_LOCK (40)
+#define TIM_BUCKET_W1_M_LOCK ((1ull << (48 - 40)) - 1)
+#define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48)
+#define TIM_BUCKET_W1_M_CHUNK_REMAINDER ((1ull << (64 - 48)) - 1)
+
+#define TIM_BUCKET_SEMA \
+ (TIM_BUCKET_CHUNK_REMAIN)
+
+#define TIM_BUCKET_CHUNK_REMAIN \
+ (TIM_BUCKET_W1_M_CHUNK_REMAINDER << TIM_BUCKET_W1_S_CHUNK_REMAINDER)
+
+#define TIM_BUCKET_LOCK \
+ (TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK)
+
+#define TIM_BUCKET_SEMA_WLOCK \
+ (TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
+
+#define NSEC_PER_SEC 1E9
+#define NSEC2CLK(__ns, __freq) (((__ns) * (__freq)) / NSEC_PER_SEC)
+#define CLK2NSEC(__clk, __freq) (((__clk) * NSEC_PER_SEC) / (__freq))
+
+#define timvf_read64 rte_read64_relaxed
+#define timvf_write64 rte_write64_relaxed
+
+#define TIMVF_ENABLE_STATS_ARG ("timvf_stats")
+
+extern int otx_logtype_timvf;
+static const uint16_t nb_chunk_slots = (TIM_CHUNK_SIZE / 16) - 1;
+
+struct timvf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_timvfs; /* Total timvf available in domain */
+};
+
+enum timvf_clk_src {
+ TIM_CLK_SRC_SCLK = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
+ TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
+ TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
+};
+
+/* TIM_MEM_BUCKET */
+struct tim_mem_bucket {
+ uint64_t first_chunk;
+ union {
+ uint64_t w1;
+ struct {
+ uint32_t nb_entry;
+ uint8_t sbt:1;
+ uint8_t hbt:1;
+ uint8_t bsk:1;
+ uint8_t rsvd:5;
+ uint8_t lock;
+ int16_t chunk_remainder;
+ };
+ };
+ uint64_t current_chunk;
+ uint64_t pad;
+} __rte_packed __rte_aligned(8);
+
+struct tim_mem_entry {
+ uint64_t w0;
+ uint64_t wqe;
+} __rte_packed;
+
+struct timvf_ctrl_reg {
+ uint64_t rctrl0;
+ uint64_t rctrl1;
+ uint64_t rctrl2;
+ uint8_t use_pmu;
+} __rte_packed;
+
+struct timvf_ring;
+
+typedef uint32_t (*bkt_id)(const uint32_t bkt_tcks, const uint32_t nb_bkts);
+typedef struct tim_mem_entry * (*refill_chunk)(
+ struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr);
+
+struct timvf_ring {
+ bkt_id get_target_bkt;
+ refill_chunk refill_chunk;
+ struct rte_reciprocal_u64 fast_div;
+ uint64_t ring_start_cyc;
+ uint32_t nb_bkts;
+ struct tim_mem_bucket *bkt;
+ void *chunk_pool;
+ uint64_t tck_int;
+ volatile uint64_t tim_arm_cnt;
+ uint64_t tck_nsec;
+ void *vbar0;
+ void *bkt_pos;
+ uint64_t max_tout;
+ uint64_t nb_chunks;
+ enum timvf_clk_src clk_src;
+ uint16_t tim_ring_id;
+} __rte_cache_aligned;
+
+static __rte_always_inline uint32_t
+bkt_mod(const uint32_t rel_bkt, const uint32_t nb_bkts)
+{
+ return rel_bkt % nb_bkts;
+}
+
+static __rte_always_inline uint32_t
+bkt_and(uint32_t rel_bkt, uint32_t nb_bkts)
+{
+ return rel_bkt & (nb_bkts - 1);
+}
+
+int timvf_info(struct timvf_info *tinfo);
+void *timvf_bar(uint8_t id, uint8_t bar);
+int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
+ uint8_t enable_stats);
+uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_sp_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_mp_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers);
+uint16_t timvf_timer_arm_tmo_brst_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers);
+void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa);
+
+#endif /* __TIMVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/timvf_probe.c b/drivers/event/octeontx/timvf_probe.c
new file mode 100644
index 00000000..08dbd2be
--- /dev/null
+++ b/drivers/event/octeontx/timvf_probe.c
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#include <octeontx_mbox.h>
+
+#include "ssovf_evdev.h"
+#include "timvf_evdev.h"
+
+#ifndef PCI_VENDOR_ID_CAVIUM
+#define PCI_VENDOR_ID_CAVIUM (0x177D)
+#endif
+
+#define PCI_DEVICE_ID_OCTEONTX_TIM_VF (0xA051)
+#define TIM_MAX_RINGS (64)
+
+struct timvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct timdev {
+ uint8_t total_timvfs;
+ struct timvf_res rings[TIM_MAX_RINGS];
+};
+
+static struct timdev tdev;
+
+int
+timvf_info(struct timvf_info *tinfo)
+{
+ int i;
+ struct ssovf_info info;
+
+ if (tinfo == NULL)
+ return -EINVAL;
+
+ if (!tdev.total_timvfs)
+ return -ENODEV;
+
+ if (ssovf_info(&info) < 0)
+ return -EINVAL;
+
+ for (i = 0; i < tdev.total_timvfs; i++) {
+ if (info.domain != tdev.rings[i].domain) {
+ timvf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, tdev.rings[i].vfid,
+ info.domain, tdev.rings[i].domain,
+ tdev.rings[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ tinfo->total_timvfs = tdev.total_timvfs;
+ tinfo->domain = info.domain;
+ return 0;
+}
+
+void*
+timvf_bar(uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return NULL;
+
+ if (id > tdev.total_timvfs)
+ return NULL;
+
+ switch (bar) {
+ case 0:
+ return tdev.rings[id].bar0;
+ case 4:
+ return tdev.rings[id].bar4;
+ default:
+ return NULL;
+ }
+}
+
+static int
+timvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ struct timvf_res *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ timvf_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ val = rte_read64((uint8_t *)pci_dev->mem_resource[0].addr +
+ 0x100 /* TIM_VRINGX_BASE */);
+ vfid = (val >> 23) & 0xff;
+ if (vfid >= TIM_MAX_RINGS) {
+ timvf_log_err("Invalid vfid(%d/%d)", vfid, TIM_MAX_RINGS);
+ return -EINVAL;
+ }
+
+ res = &tdev.rings[tdev.total_timvfs];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = (val >> 7) & 0xffff;
+ tdev.total_timvfs++;
+ rte_wmb();
+
+ timvf_log_dbg("Domain=%d VFid=%d bar0 %p total_timvfs=%d", res->domain,
+ res->vfid, pci_dev->mem_resource[0].addr,
+ tdev.total_timvfs);
+ return 0;
+}
+
+
+static const struct rte_pci_id pci_timvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_TIM_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_timvf = {
+ .id_table = pci_timvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = timvf_probe,
+ .remove = NULL,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_timvf, pci_timvf);
diff --git a/drivers/event/octeontx/timvf_worker.c b/drivers/event/octeontx/timvf_worker.c
new file mode 100644
index 00000000..50790e19
--- /dev/null
+++ b/drivers/event/octeontx/timvf_worker.c
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "timvf_worker.h"
+
+static inline int
+timvf_timer_reg_checks(const struct timvf_ring * const timr,
+ struct rte_event_timer * const tim)
+{
+ if (unlikely(tim->state)) {
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ rte_errno = EALREADY;
+ goto fail;
+ }
+
+ if (unlikely(!tim->timeout_ticks ||
+ tim->timeout_ticks >= timr->nb_bkts)) {
+ tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
+ : RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ rte_errno = EINVAL;
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static inline void
+timvf_format_event(const struct rte_event_timer * const tim,
+ struct tim_mem_entry * const entry)
+{
+ entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
+ (tim->ev.event & 0xFFFFFFFFF);
+ entry->wqe = tim->ev.u64;
+}
+
+uint16_t
+timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ RTE_SET_USED(adptr);
+ int ret;
+ uint16_t index;
+
+ for (index = 0; index < nb_timers; index++) {
+ if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
+ rte_errno = EALREADY;
+ break;
+ }
+
+ if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EINVAL;
+ break;
+ }
+ ret = timvf_rem_entry(tim[index]);
+ if (ret) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t index;
+ struct tim_mem_entry entry;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ for (index = 0; index < nb_timers; index++) {
+ if (timvf_timer_reg_checks(timr, tim[index]))
+ break;
+
+ timvf_format_event(tim[index], &entry);
+ ret = timvf_add_entry_sp(timr, tim[index]->timeout_ticks,
+ tim[index], &entry);
+ if (unlikely(ret)) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_sp_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ uint16_t ret;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_timer_arm_burst_sp(adptr, tim, nb_timers);
+ timr->tim_arm_cnt += ret;
+
+ return ret;
+}
+
+uint16_t
+timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t index;
+ struct tim_mem_entry entry;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ for (index = 0; index < nb_timers; index++) {
+ if (timvf_timer_reg_checks(timr, tim[index]))
+ break;
+ timvf_format_event(tim[index], &entry);
+ ret = timvf_add_entry_mp(timr, tim[index]->timeout_ticks,
+ tim[index], &entry);
+ if (unlikely(ret)) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_mp_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ uint16_t ret;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_timer_arm_burst_mp(adptr, tim, nb_timers);
+ timr->tim_arm_cnt += ret;
+
+ return ret;
+}
+
+uint16_t
+timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t set_timers = 0;
+ uint16_t idx;
+ uint16_t arr_idx = 0;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ struct tim_mem_entry entry[TIMVF_MAX_BURST] __rte_cache_aligned;
+
+ if (unlikely(!timeout_tick || timeout_tick >= timr->nb_bkts)) {
+ const enum rte_event_timer_state state = timeout_tick ?
+ RTE_EVENT_TIMER_ERROR_TOOLATE :
+ RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ for (idx = 0; idx < nb_timers; idx++)
+ tim[idx]->state = state;
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ while (arr_idx < nb_timers) {
+ for (idx = 0; idx < TIMVF_MAX_BURST && (arr_idx < nb_timers);
+ idx++, arr_idx++) {
+ timvf_format_event(tim[arr_idx], &entry[idx]);
+ }
+ ret = timvf_add_entry_brst(timr, timeout_tick, &tim[set_timers],
+ entry, idx);
+ set_timers += ret;
+ if (ret != idx)
+ break;
+ }
+
+ return set_timers;
+}
+
+
+uint16_t
+timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers)
+{
+ uint16_t set_timers;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ set_timers = timvf_timer_arm_tmo_brst(adptr, tim, timeout_tick,
+ nb_timers);
+ timr->tim_arm_cnt += set_timers;
+
+ return set_timers;
+}
+
+void
+timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
+{
+ if (use_fpa)
+ timr->refill_chunk = timvf_refill_chunk_fpa;
+ else
+ timr->refill_chunk = timvf_refill_chunk_generic;
+}
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
new file mode 100644
index 00000000..dede1a4a
--- /dev/null
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_common.h>
+#include <rte_branch_prediction.h>
+
+#include "timvf_evdev.h"
+
+static inline int16_t
+timr_bkt_fetch_rem(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
+ TIM_BUCKET_W1_M_CHUNK_REMAINDER;
+}
+
+static inline int16_t
+timr_bkt_get_rem(struct tim_mem_bucket *bktp)
+{
+ return __atomic_load_n(&bktp->chunk_remainder,
+ __ATOMIC_ACQUIRE);
+}
+
+static inline void
+timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
+{
+ __atomic_store_n(&bktp->chunk_remainder, v,
+ __ATOMIC_RELEASE);
+}
+
+static inline void
+timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
+{
+ __atomic_fetch_sub(&bktp->chunk_remainder, v,
+ __ATOMIC_RELEASE);
+}
+
+static inline uint8_t
+timr_bkt_get_sbt(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT;
+}
+
+static inline uint64_t
+timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
+ return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
+ return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint8_t
+timr_bkt_get_shbt(uint64_t w1)
+{
+ return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) |
+ ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT);
+}
+
+static inline uint8_t
+timr_bkt_get_hbt(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
+}
+
+static inline uint8_t
+timr_bkt_get_bsk(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
+}
+
+static inline uint64_t
+timr_bkt_clr_bsk(struct tim_mem_bucket *bktp)
+{
+ /*Clear everything except lock. */
+ const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
+ return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
+ __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
+ return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline void
+timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
+{
+ __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+}
+
+static inline uint32_t
+timr_bkt_get_nent(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
+ TIM_BUCKET_W1_M_NUM_ENTRIES;
+}
+
+static inline void
+timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
+{
+ __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+}
+
+static inline void
+timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
+{
+ __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
+ TIM_BUCKET_W1_S_NUM_ENTRIES);
+ return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline struct tim_mem_entry *
+timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt)
+{
+ struct tim_mem_entry *chunk;
+ struct tim_mem_entry *pnext;
+ chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk);
+ chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0;
+
+ while (chunk) {
+ pnext = (struct tim_mem_entry *)(uintptr_t)
+ ((chunk + nb_chunk_slots)->w0);
+ rte_mempool_put(timr->chunk_pool, chunk);
+ chunk = pnext;
+ }
+ return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk;
+}
+
+static inline int
+timvf_rem_entry(struct rte_event_timer *tim)
+{
+ uint64_t lock_sema;
+ struct tim_mem_entry *entry;
+ struct tim_mem_bucket *bkt;
+ if (tim->impl_opaque[1] == 0 ||
+ tim->impl_opaque[0] == 0)
+ return -ENOENT;
+
+ entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0];
+ if (entry->wqe != tim->ev.u64) {
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return -ENOENT;
+ }
+ bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1];
+ lock_sema = timr_bkt_inc_lock(bkt);
+ if (timr_bkt_get_shbt(lock_sema)
+ || !timr_bkt_get_nent(lock_sema)) {
+ timr_bkt_dec_lock(bkt);
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return -ENOENT;
+ }
+
+ entry->w0 = entry->wqe = 0;
+ timr_bkt_dec_lock(bkt);
+
+ tim->state = RTE_EVENT_TIMER_CANCELED;
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return 0;
+}
+
+static inline struct tim_mem_entry *
+timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr)
+{
+ struct tim_mem_entry *chunk;
+
+ if (bkt->nb_entry || !bkt->first_chunk) {
+ if (unlikely(rte_mempool_get(timr->chunk_pool,
+ (void **)&chunk))) {
+ return NULL;
+ }
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+ bkt->current_chunk) +
+ nb_chunk_slots) =
+ (uintptr_t) chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t) chunk;
+ }
+ } else {
+ chunk = timr_clr_bkt(timr, bkt);
+ bkt->first_chunk = (uintptr_t)chunk;
+ }
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+
+ return chunk;
+}
+
+static inline struct tim_mem_entry *
+timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr)
+{
+ struct tim_mem_entry *chunk;
+
+ if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
+ return NULL;
+
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+ bkt->current_chunk) +
+ nb_chunk_slots) =
+ (uintptr_t) chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t) chunk;
+ }
+
+ return chunk;
+}
+
+static inline struct tim_mem_bucket *
+timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
+{
+ const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
+ const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
+ &timr->fast_div) + rel_bkt;
+ const uint32_t tbkt_id = timr->get_target_bkt(bucket,
+ timr->nb_bkts);
+ return &timr->bkt[tbkt_id];
+}
+
+/* Single producer functions. */
+static inline int
+timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct tim_mem_entry * const pent)
+{
+ int16_t rem;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+__retry:
+ /*Get Bucket sema*/
+ lock_sema = timr_bkt_fetch_sema(bkt);
+ /* Bucket related checks. */
+ if (unlikely(timr_bkt_get_hbt(lock_sema)))
+ goto __retry;
+
+ /* Insert the work. */
+ rem = timr_bkt_fetch_rem(lock_sema);
+
+ if (!rem) {
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_set_rem(bkt, 0);
+ tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ bkt->current_chunk = (uintptr_t) chunk;
+ timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += nb_chunk_slots - rem;
+ }
+ /* Copy work entry. */
+ *chunk = *pent;
+ timr_bkt_inc_nent(bkt);
+
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+ return 0;
+}
+
+/* Multi producer functions. */
+static inline int
+timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct tim_mem_entry * const pent)
+{
+ int16_t rem;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+__retry:
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+ /* Bucket related checks. */
+ /*Get Bucket sema*/
+ lock_sema = timr_bkt_fetch_sema_lock(bkt);
+ if (unlikely(timr_bkt_get_shbt(lock_sema))) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ rem = timr_bkt_fetch_rem(lock_sema);
+
+ if (rem < 0) {
+ /* goto diff bucket. */
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ } else if (!rem) {
+ /*Only one thread can be here*/
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_set_rem(bkt, 0);
+ timr_bkt_dec_lock(bkt);
+ tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ bkt->current_chunk = (uintptr_t) chunk;
+ timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += nb_chunk_slots - rem;
+ }
+ /* Copy work entry. */
+ *chunk = *pent;
+ timr_bkt_inc_nent(bkt);
+ timr_bkt_dec_lock(bkt);
+
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+ return 0;
+}
+
+static inline uint16_t
+timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
+ struct tim_mem_entry *chunk,
+ struct rte_event_timer ** const tim,
+ const struct tim_mem_entry * const ents,
+ const struct tim_mem_bucket * const bkt)
+{
+ for (; index < cpy_lmt; index++) {
+ *chunk = *(ents + index);
+ tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
+ tim[index]->impl_opaque[1] = (uintptr_t)bkt;
+ tim[index]->state = RTE_EVENT_TIMER_ARMED;
+ }
+
+ return index;
+}
+
+/* Burst mode functions */
+static inline int
+timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt,
+ struct rte_event_timer ** const tim,
+ const struct tim_mem_entry *ents,
+ const uint16_t nb_timers)
+{
+ int16_t rem;
+ int16_t crem;
+ uint8_t lock_cnt;
+ uint16_t index = 0;
+ uint16_t chunk_remainder;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+__retry:
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+
+ /* Only one thread beyond this. */
+ lock_sema = timr_bkt_inc_lock(bkt);
+ lock_cnt = (uint8_t)
+ ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
+
+ if (lock_cnt) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ /* Bucket related checks. */
+ if (unlikely(timr_bkt_get_hbt(lock_sema))) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ chunk_remainder = timr_bkt_fetch_rem(lock_sema);
+ rem = chunk_remainder - nb_timers;
+ if (rem < 0) {
+ crem = nb_chunk_slots - chunk_remainder;
+ if (chunk_remainder && crem) {
+ chunk = ((struct tim_mem_entry *)
+ (uintptr_t)bkt->current_chunk) + crem;
+
+ index = timvf_cpy_wrk(index, chunk_remainder,
+ chunk, tim, ents, bkt);
+ timr_bkt_sub_rem(bkt, chunk_remainder);
+ timr_bkt_add_nent(bkt, chunk_remainder);
+ }
+ rem = nb_timers - chunk_remainder;
+ ents = ents + chunk_remainder;
+
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_dec_lock(bkt);
+ rte_errno = ENOMEM;
+ tim[index]->state = RTE_EVENT_TIMER_ERROR;
+ return crem;
+ }
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+ bkt->current_chunk = (uintptr_t) chunk;
+
+ index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
+ timr_bkt_set_rem(bkt, nb_chunk_slots - rem);
+ timr_bkt_add_nent(bkt, rem);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += (nb_chunk_slots - chunk_remainder);
+
+ index = timvf_cpy_wrk(index, nb_timers,
+ chunk, tim, ents, bkt);
+ timr_bkt_sub_rem(bkt, nb_timers);
+ timr_bkt_add_nent(bkt, nb_timers);
+ }
+
+ timr_bkt_dec_lock(bkt);
+ return nb_timers;
+}
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index 77083691..ef9fb30c 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -607,7 +607,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
static int
opdl_probe(struct rte_vdev_device *vdev)
{
- static const struct rte_eventdev_ops evdev_opdl_ops = {
+ static struct rte_eventdev_ops evdev_opdl_ops = {
.dev_configure = opdl_dev_configure,
.dev_infos_get = opdl_info_get,
.dev_close = opdl_close,
diff --git a/drivers/event/opdl/opdl_evdev_init.c b/drivers/event/opdl/opdl_evdev_init.c
index 1454de53..582ad698 100644
--- a/drivers/event/opdl/opdl_evdev_init.c
+++ b/drivers/event/opdl/opdl_evdev_init.c
@@ -733,6 +733,9 @@ initialise_all_other_ports(struct rte_eventdev *dev)
queue->ports[queue->nb_ports] = port;
port->instance_id = queue->nb_ports;
queue->nb_ports++;
+ opdl_stage_set_queue_id(stage_inst,
+ port->queue_id);
+
} else if (queue->q_pos == OPDL_Q_POS_END) {
/* tx port */
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index eca7712b..8aca481c 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -25,7 +25,10 @@
#define OPDL_NAME_SIZE 64
-#define OPDL_EVENT_MASK (0xFFFF0000000FFFFFULL)
+#define OPDL_EVENT_MASK (0x00000000000FFFFFULL)
+#define OPDL_FLOWID_MASK (0xFFFFF)
+#define OPDL_OPA_MASK (0xFF)
+#define OPDL_OPA_OFFSET (0x38)
int opdl_logtype_driver;
@@ -86,7 +89,6 @@ struct opdl_stage {
*/
uint32_t available_seq;
uint32_t head; /* Current head for single-thread operation */
- uint32_t shadow_head; /* Shadow head for single-thread operation */
uint32_t nb_instance; /* Number of instances */
uint32_t instance_id; /* ID of this stage instance */
uint16_t num_claimed; /* Number of slots claimed */
@@ -102,6 +104,9 @@ struct opdl_stage {
/* For managing disclaims in multi-threaded processing stages */
struct claim_manager pending_disclaims[RTE_MAX_LCORE]
__rte_cache_aligned;
+ uint32_t shadow_head; /* Shadow head for single-thread operation */
+ uint32_t queue_id; /* ID of Queue which is assigned to this stage */
+ uint32_t pos; /* Atomic scan position */
} __rte_cache_aligned;
/* Context for opdl_ring */
@@ -494,6 +499,9 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
{
uint32_t i = 0, j = 0, offset;
+ uint32_t opa_id = 0;
+ uint32_t flow_id = 0;
+ uint64_t event = 0;
void *get_slots;
struct rte_event *ev;
RTE_SET_USED(seq);
@@ -520,7 +528,17 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
for (j = 0; j < num_entries; j++) {
ev = (struct rte_event *)get_slot(t, s->head+j);
- if ((ev->flow_id%s->nb_instance) == s->instance_id) {
+
+ event = __atomic_load_n(&(ev->event),
+ __ATOMIC_ACQUIRE);
+
+ opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
+ flow_id = OPDL_FLOWID_MASK & event;
+
+ if (opa_id >= s->queue_id)
+ continue;
+
+ if ((flow_id % s->nb_instance) == s->instance_id) {
memcpy(entries_offset, ev, t->slot_size);
entries_offset += t->slot_size;
i++;
@@ -531,6 +549,7 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
s->head += num_entries;
s->num_claimed = num_entries;
s->num_event = i;
+ s->pos = 0;
/* automatically disclaim entries if number of rte_events is zero */
if (unlikely(i == 0))
@@ -953,21 +972,26 @@ opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index)
}
bool
-opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
uint32_t index, bool atomic)
{
- uint32_t i = 0, j = 0, offset;
+ uint32_t i = 0, offset;
struct opdl_ring *t = s->t;
struct rte_event *ev_orig = NULL;
bool ev_updated = false;
- uint64_t ev_temp = 0;
+ uint64_t ev_temp = 0;
+ uint64_t ev_update = 0;
+
+ uint32_t opa_id = 0;
+ uint32_t flow_id = 0;
+ uint64_t event = 0;
if (index > s->num_event) {
PMD_DRV_LOG(ERR, "index is overflow");
return ev_updated;
}
- ev_temp = ev->event&OPDL_EVENT_MASK;
+ ev_temp = ev->event & OPDL_EVENT_MASK;
if (!atomic) {
offset = opdl_first_entry_id(s->seq, s->nb_instance,
@@ -984,27 +1008,39 @@ opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
}
} else {
- for (i = 0; i < s->num_claimed; i++) {
+ for (i = s->pos; i < s->num_claimed; i++) {
ev_orig = (struct rte_event *)
get_slot(t, s->shadow_head+i);
- if ((ev_orig->flow_id%s->nb_instance) ==
- s->instance_id) {
-
- if (j == index) {
- if ((ev_orig->event&OPDL_EVENT_MASK) !=
- ev_temp) {
- ev_orig->event = ev->event;
- ev_updated = true;
- }
- if (ev_orig->u64 != ev->u64) {
- ev_orig->u64 = ev->u64;
- ev_updated = true;
- }
-
- break;
+ event = __atomic_load_n(&(ev_orig->event),
+ __ATOMIC_ACQUIRE);
+
+ opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
+ flow_id = OPDL_FLOWID_MASK & event;
+
+ if (opa_id >= s->queue_id)
+ continue;
+
+ if ((flow_id % s->nb_instance) == s->instance_id) {
+ ev_update = s->queue_id;
+ ev_update = (ev_update << OPDL_OPA_OFFSET)
+ | ev->event;
+
+ s->pos = i + 1;
+
+ if ((event & OPDL_EVENT_MASK) !=
+ ev_temp) {
+ __atomic_store_n(&(ev_orig->event),
+ ev_update,
+ __ATOMIC_RELEASE);
+ ev_updated = true;
}
- j++;
+ if (ev_orig->u64 != ev->u64) {
+ ev_orig->u64 = ev->u64;
+ ev_updated = true;
+ }
+
+ break;
}
}
@@ -1049,11 +1085,7 @@ check_deps(struct opdl_ring *t, struct opdl_stage *deps[],
return -EINVAL;
}
}
- if (num_deps > t->num_stages) {
- PMD_DRV_LOG(ERR, "num_deps (%u) > number stages (%u)",
- num_deps, t->num_stages);
- return -EINVAL;
- }
+
return 0;
}
@@ -1154,6 +1186,13 @@ opdl_stage_get_opdl_ring(const struct opdl_stage *s)
}
void
+opdl_stage_set_queue_id(struct opdl_stage *s,
+ uint32_t queue_id)
+{
+ s->queue_id = queue_id;
+}
+
+void
opdl_ring_dump(const struct opdl_ring *t, FILE *f)
{
uint32_t i;
diff --git a/drivers/event/opdl/opdl_ring.h b/drivers/event/opdl/opdl_ring.h
index 9e8c33e6..751a59db 100644
--- a/drivers/event/opdl/opdl_ring.h
+++ b/drivers/event/opdl/opdl_ring.h
@@ -518,6 +518,20 @@ opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries);
struct opdl_stage *
opdl_stage_create(struct opdl_ring *t, bool threadsafe);
+
+/**
+ * Set the internal queue id for each stage instance.
+ *
+ * @param s
+ * The pointer of stage instance.
+ *
+ * @param queue_id
+ * The value of internal queue id.
+ */
+void
+opdl_stage_set_queue_id(struct opdl_stage *s,
+ uint32_t queue_id);
+
/**
* Prints information on opdl_ring instance and all its stages
*
@@ -590,7 +604,7 @@ opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
*/
bool
-opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
uint32_t index, bool atomic);
#ifdef __cplusplus
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 7f467568..c889220e 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -319,7 +319,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
/* Initialize and register event driver with DPDK Application */
-static const struct rte_eventdev_ops skeleton_eventdev_ops = {
+static struct rte_eventdev_ops skeleton_eventdev_ops = {
.dev_infos_get = skeleton_eventdev_info_get,
.dev_configure = skeleton_eventdev_configure,
.dev_start = skeleton_eventdev_start,
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 6672fd8e..10f0e1ad 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -464,6 +464,33 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
return 0;
}
+static int
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
+ uint64_t flags,
+ uint32_t *caps,
+ const struct rte_event_timer_adapter_ops **ops)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(flags);
+ *caps = 0;
+
+ /* Use default SW ops */
+ *ops = NULL;
+
+ return 0;
+}
+
+static int
+sw_crypto_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cdev);
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
+ return 0;
+}
+
static void
sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
{
@@ -772,7 +799,7 @@ static int32_t sw_sched_service_func(void *args)
static int
sw_probe(struct rte_vdev_device *vdev)
{
- static const struct rte_eventdev_ops evdev_sw_ops = {
+ static struct rte_eventdev_ops evdev_sw_ops = {
.dev_configure = sw_dev_configure,
.dev_infos_get = sw_info_get,
.dev_close = sw_close,
@@ -791,6 +818,10 @@ sw_probe(struct rte_vdev_device *vdev)
.eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
+ .timer_adapter_caps_get = sw_timer_adapter_caps_get,
+
+ .crypto_adapter_caps_get = sw_crypto_adapter_caps_get,
+
.xstats_get = sw_xstats_get,
.xstats_get_names = sw_xstats_get_names,
.xstats_get_by_name = sw_xstats_get_by_name,
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index 3106eb33..e3a41e02 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -508,7 +508,7 @@ sw_event_schedule(struct rte_eventdev *dev)
uint32_t i;
sw->sched_called++;
- if (!sw->started)
+ if (unlikely(!sw->started))
return;
do {
@@ -532,8 +532,7 @@ sw_event_schedule(struct rte_eventdev *dev)
} while (in_pkts > 4 &&
(int)in_pkts_this_iteration < sched_quanta);
- out_pkts = 0;
- out_pkts += sw_schedule_qid_to_cq(sw);
+ out_pkts = sw_schedule_qid_to_cq(sw);
out_pkts_total += out_pkts;
in_pkts_total += in_pkts_this_iteration;
@@ -541,6 +540,12 @@ sw_event_schedule(struct rte_eventdev *dev)
break;
} while ((int)out_pkts_total < sched_quanta);
+ sw->stats.tx_pkts += out_pkts_total;
+ sw->stats.rx_pkts += in_pkts_total;
+
+ sw->sched_no_iq_enqueues += (in_pkts_total == 0);
+ sw->sched_no_cq_enqueues += (out_pkts_total == 0);
+
/* push all the internal buffered QEs in port->cq_ring to the
* worker cores: aka, do the ring transfers batched.
*/
@@ -552,10 +557,4 @@ sw_event_schedule(struct rte_eventdev *dev)
sw->ports[i].cq_buf_count = 0;
}
- sw->stats.tx_pkts += out_pkts_total;
- sw->stats.rx_pkts += in_pkts_total;
-
- sw->sched_no_iq_enqueues += (in_pkts_total == 0);
- sw->sched_no_cq_enqueues += (out_pkts_total == 0);
-
}
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index 67151f77..063b919c 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -77,8 +77,10 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
rte_atomic32_add(&sw->inflights, credit_update_quanta);
p->inflight_credits += (credit_update_quanta);
- if (p->inflight_credits < new)
- return 0;
+ /* If there are fewer inflight credits than new events, limit
+ * the number of enqueued events.
+ */
+ num = (p->inflight_credits < new) ? p->inflight_credits : new;
}
for (i = 0; i < num; i++) {
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
index aae2cb10..28c2e836 100644
--- a/drivers/mempool/Makefile
+++ b/drivers/mempool/Makefile
@@ -3,8 +3,13 @@
include $(RTE_SDK)/mk/rte.vars.mk
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += bucket
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2
+endif
DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring
DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack
DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx
diff --git a/drivers/mempool/bucket/Makefile b/drivers/mempool/bucket/Makefile
new file mode 100644
index 00000000..7364916b
--- /dev/null
+++ b/drivers/mempool/bucket/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_bucket.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+EXPORT_MAP := rte_mempool_bucket_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += rte_mempool_bucket.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/bucket/meson.build b/drivers/mempool/bucket/meson.build
new file mode 100644
index 00000000..618d7912
--- /dev/null
+++ b/drivers/mempool/bucket/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+sources = files('rte_mempool_bucket.c')
diff --git a/drivers/mempool/bucket/rte_mempool_bucket.c b/drivers/mempool/bucket/rte_mempool_bucket.c
new file mode 100644
index 00000000..78d2b9d0
--- /dev/null
+++ b/drivers/mempool/bucket/rte_mempool_bucket.c
@@ -0,0 +1,628 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+/*
+ * The general idea of the bucket mempool driver is as follows.
+ * We keep track of physically contiguous groups (buckets) of objects
+ * of a certain size. Every such a group has a counter that is
+ * incremented every time an object from that group is enqueued.
+ * Until the bucket is full, no objects from it are eligible for allocation.
+ * If a request is made to dequeue a multiply of bucket size, it is
+ * satisfied by returning the whole buckets, instead of separate objects.
+ */
+
+
+struct bucket_header {
+ unsigned int lcore_id;
+ uint8_t fill_cnt;
+};
+
+struct bucket_stack {
+ unsigned int top;
+ unsigned int limit;
+ void *objects[];
+};
+
+struct bucket_data {
+ unsigned int header_size;
+ unsigned int total_elt_size;
+ unsigned int obj_per_bucket;
+ unsigned int bucket_stack_thresh;
+ uintptr_t bucket_page_mask;
+ struct rte_ring *shared_bucket_ring;
+ struct bucket_stack *buckets[RTE_MAX_LCORE];
+ /*
+ * Multi-producer single-consumer ring to hold objects that are
+ * returned to the mempool at a different lcore than initially
+ * dequeued
+ */
+ struct rte_ring *adoption_buffer_rings[RTE_MAX_LCORE];
+ struct rte_ring *shared_orphan_ring;
+ struct rte_mempool *pool;
+ unsigned int bucket_mem_size;
+};
+
+static struct bucket_stack *
+bucket_stack_create(const struct rte_mempool *mp, unsigned int n_elts)
+{
+ struct bucket_stack *stack;
+
+ stack = rte_zmalloc_socket("bucket_stack",
+ sizeof(struct bucket_stack) +
+ n_elts * sizeof(void *),
+ RTE_CACHE_LINE_SIZE,
+ mp->socket_id);
+ if (stack == NULL)
+ return NULL;
+ stack->limit = n_elts;
+ stack->top = 0;
+
+ return stack;
+}
+
+static void
+bucket_stack_push(struct bucket_stack *stack, void *obj)
+{
+ RTE_ASSERT(stack->top < stack->limit);
+ stack->objects[stack->top++] = obj;
+}
+
+static void *
+bucket_stack_pop_unsafe(struct bucket_stack *stack)
+{
+ RTE_ASSERT(stack->top > 0);
+ return stack->objects[--stack->top];
+}
+
+static void *
+bucket_stack_pop(struct bucket_stack *stack)
+{
+ if (stack->top == 0)
+ return NULL;
+ return bucket_stack_pop_unsafe(stack);
+}
+
+static int
+bucket_enqueue_single(struct bucket_data *bd, void *obj)
+{
+ int rc = 0;
+ uintptr_t addr = (uintptr_t)obj;
+ struct bucket_header *hdr;
+ unsigned int lcore_id = rte_lcore_id();
+
+ addr &= bd->bucket_page_mask;
+ hdr = (struct bucket_header *)addr;
+
+ if (likely(hdr->lcore_id == lcore_id)) {
+ if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ /* Stack is big enough to put all buckets */
+ bucket_stack_push(bd->buckets[lcore_id], hdr);
+ }
+ } else if (hdr->lcore_id != LCORE_ID_ANY) {
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[hdr->lcore_id];
+
+ rc = rte_ring_enqueue(adopt_ring, obj);
+ /* Ring is big enough to put all objects */
+ RTE_ASSERT(rc == 0);
+ } else if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ rc = rte_ring_enqueue(bd->shared_bucket_ring, hdr);
+ /* Ring is big enough to put all buckets */
+ RTE_ASSERT(rc == 0);
+ }
+
+ return rc;
+}
+
+static int
+bucket_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ struct bucket_stack *local_stack = bd->buckets[rte_lcore_id()];
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < n; i++) {
+ rc = bucket_enqueue_single(bd, obj_table[i]);
+ RTE_ASSERT(rc == 0);
+ }
+ if (local_stack->top > bd->bucket_stack_thresh) {
+ rte_ring_enqueue_bulk(bd->shared_bucket_ring,
+ &local_stack->objects
+ [bd->bucket_stack_thresh],
+ local_stack->top -
+ bd->bucket_stack_thresh,
+ NULL);
+ local_stack->top = bd->bucket_stack_thresh;
+ }
+ return rc;
+}
+
+static void **
+bucket_fill_obj_table(const struct bucket_data *bd, void **pstart,
+ void **obj_table, unsigned int n)
+{
+ unsigned int i;
+ uint8_t *objptr = *pstart;
+
+ for (objptr += bd->header_size, i = 0; i < n;
+ i++, objptr += bd->total_elt_size)
+ *obj_table++ = objptr;
+ *pstart = objptr;
+ return obj_table;
+}
+
+static int
+bucket_dequeue_orphans(struct bucket_data *bd, void **obj_table,
+ unsigned int n_orphans)
+{
+ unsigned int i;
+ int rc;
+ uint8_t *objptr;
+
+ rc = rte_ring_dequeue_bulk(bd->shared_orphan_ring, obj_table,
+ n_orphans, NULL);
+ if (unlikely(rc != (int)n_orphans)) {
+ struct bucket_header *hdr;
+
+ objptr = bucket_stack_pop(bd->buckets[rte_lcore_id()]);
+ hdr = (struct bucket_header *)objptr;
+
+ if (objptr == NULL) {
+ rc = rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&objptr);
+ if (rc != 0) {
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr = (struct bucket_header *)objptr;
+ hdr->lcore_id = rte_lcore_id();
+ }
+ hdr->fill_cnt = 0;
+ bucket_fill_obj_table(bd, (void **)&objptr, obj_table,
+ n_orphans);
+ for (i = n_orphans; i < bd->obj_per_bucket; i++,
+ objptr += bd->total_elt_size) {
+ rc = rte_ring_enqueue(bd->shared_orphan_ring,
+ objptr);
+ if (rc != 0) {
+ RTE_ASSERT(0);
+ rte_errno = -rc;
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+bucket_dequeue_buckets(struct bucket_data *bd, void **obj_table,
+ unsigned int n_buckets)
+{
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n_buckets, cur_stack->top);
+ void **obj_table_base = obj_table;
+
+ n_buckets -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ void *obj = bucket_stack_pop_unsafe(cur_stack);
+
+ obj_table = bucket_fill_obj_table(bd, &obj, obj_table,
+ bd->obj_per_bucket);
+ }
+ while (n_buckets-- > 0) {
+ struct bucket_header *hdr;
+
+ if (unlikely(rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&hdr) != 0)) {
+ /*
+ * Return the already-dequeued buffers
+ * back to the mempool
+ */
+ bucket_enqueue(bd->pool, obj_table_base,
+ obj_table - obj_table_base);
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr->lcore_id = rte_lcore_id();
+ obj_table = bucket_fill_obj_table(bd, (void **)&hdr,
+ obj_table,
+ bd->obj_per_bucket);
+ }
+
+ return 0;
+}
+
+static int
+bucket_adopt_orphans(struct bucket_data *bd)
+{
+ int rc = 0;
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[rte_lcore_id()];
+
+ if (unlikely(!rte_ring_empty(adopt_ring))) {
+ void *orphan;
+
+ while (rte_ring_sc_dequeue(adopt_ring, &orphan) == 0) {
+ rc = bucket_enqueue_single(bd, orphan);
+ RTE_ASSERT(rc == 0);
+ }
+ }
+ return rc;
+}
+
+static int
+bucket_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int n_buckets = n / bd->obj_per_bucket;
+ unsigned int n_orphans = n - n_buckets * bd->obj_per_bucket;
+ int rc = 0;
+
+ bucket_adopt_orphans(bd);
+
+ if (unlikely(n_orphans > 0)) {
+ rc = bucket_dequeue_orphans(bd, obj_table +
+ (n_buckets * bd->obj_per_bucket),
+ n_orphans);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (likely(n_buckets > 0)) {
+ rc = bucket_dequeue_buckets(bd, obj_table, n_buckets);
+ if (unlikely(rc != 0) && n_orphans > 0) {
+ rte_ring_enqueue_bulk(bd->shared_orphan_ring,
+ obj_table + (n_buckets *
+ bd->obj_per_bucket),
+ n_orphans, NULL);
+ }
+ }
+
+ return rc;
+}
+
+static int
+bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ const uint32_t header_size = bd->header_size;
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top);
+ struct bucket_header *hdr;
+ void **first_objp = first_obj_table;
+
+ bucket_adopt_orphans(bd);
+
+ n -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ hdr = bucket_stack_pop_unsafe(cur_stack);
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ if (n > 0) {
+ if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring,
+ first_objp, n, NULL) != n)) {
+ /* Return the already dequeued buckets */
+ while (first_objp-- != first_obj_table) {
+ bucket_stack_push(cur_stack,
+ (uint8_t *)*first_objp -
+ header_size);
+ }
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ while (n-- > 0) {
+ hdr = (struct bucket_header *)*first_objp;
+ hdr->lcore_id = rte_lcore_id();
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ }
+
+ return 0;
+}
+
+static void
+count_underfilled_buckets(struct rte_mempool *mp,
+ void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ __rte_unused unsigned int mem_idx)
+{
+ unsigned int *pcount = opaque;
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz =
+ (unsigned int)(~bd->bucket_page_mask + 1);
+ uintptr_t align;
+ uint8_t *iter;
+
+ align = (uintptr_t)RTE_PTR_ALIGN_CEIL(memhdr->addr, bucket_page_sz) -
+ (uintptr_t)memhdr->addr;
+
+ for (iter = (uint8_t *)memhdr->addr + align;
+ iter < (uint8_t *)memhdr->addr + memhdr->len;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+
+ *pcount += hdr->fill_cnt;
+ }
+}
+
+static unsigned int
+bucket_get_count(const struct rte_mempool *mp)
+{
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int count =
+ bd->obj_per_bucket * rte_ring_count(bd->shared_bucket_ring) +
+ rte_ring_count(bd->shared_orphan_ring);
+ unsigned int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ count += bd->obj_per_bucket * bd->buckets[i]->top +
+ rte_ring_count(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_mempool_mem_iter((struct rte_mempool *)(uintptr_t)mp,
+ count_underfilled_buckets, &count);
+
+ return count;
+}
+
+static int
+bucket_alloc(struct rte_mempool *mp)
+{
+ int rg_flags = 0;
+ int rc = 0;
+ char rg_name[RTE_RING_NAMESIZE];
+ struct bucket_data *bd;
+ unsigned int i;
+ unsigned int bucket_header_size;
+
+ bd = rte_zmalloc_socket("bucket_pool", sizeof(*bd),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (bd == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_data;
+ }
+ bd->pool = mp;
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ bucket_header_size = sizeof(struct bucket_header);
+ else
+ bucket_header_size = RTE_CACHE_LINE_SIZE;
+ RTE_BUILD_BUG_ON(sizeof(struct bucket_header) > RTE_CACHE_LINE_SIZE);
+ bd->header_size = mp->header_size + bucket_header_size;
+ bd->total_elt_size = mp->header_size + mp->elt_size + mp->trailer_size;
+ bd->bucket_mem_size = RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024;
+ bd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) /
+ bd->total_elt_size;
+ bd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1);
+ /* eventually this should be a tunable parameter */
+ bd->bucket_stack_thresh = (mp->size / bd->obj_per_bucket) * 4 / 3;
+
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ bd->buckets[i] =
+ bucket_stack_create(mp, mp->size / bd->obj_per_bucket);
+ if (bd->buckets[i] == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_stacks;
+ }
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".a%u", mp->name, i);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto no_mem_for_stacks;
+ }
+ bd->adoption_buffer_rings[i] =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id,
+ rg_flags | RING_F_SC_DEQ);
+ if (bd->adoption_buffer_rings[i] == NULL) {
+ rc = -rte_errno;
+ goto no_mem_for_stacks;
+ }
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".0", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_orphan_ring;
+ }
+ bd->shared_orphan_ring =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id, rg_flags);
+ if (bd->shared_orphan_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_orphan_ring;
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".1", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_bucket_ring;
+ }
+ bd->shared_bucket_ring =
+ rte_ring_create(rg_name,
+ rte_align32pow2((mp->size + 1) /
+ bd->obj_per_bucket),
+ mp->socket_id, rg_flags);
+ if (bd->shared_bucket_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_bucket_ring;
+ }
+
+ mp->pool_data = bd;
+
+ return 0;
+
+cannot_create_shared_bucket_ring:
+invalid_shared_bucket_ring:
+ rte_ring_free(bd->shared_orphan_ring);
+cannot_create_shared_orphan_ring:
+invalid_shared_orphan_ring:
+no_mem_for_stacks:
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+ rte_free(bd);
+no_mem_for_data:
+ rte_errno = -rc;
+ return rc;
+}
+
+static void
+bucket_free(struct rte_mempool *mp)
+{
+ unsigned int i;
+ struct bucket_data *bd = mp->pool_data;
+
+ if (bd == NULL)
+ return;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_ring_free(bd->shared_orphan_ring);
+ rte_ring_free(bd->shared_bucket_ring);
+
+ rte_free(bd);
+}
+
+static ssize_t
+bucket_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
+ __rte_unused uint32_t pg_shift, size_t *min_total_elt_size,
+ size_t *align)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ *align = bucket_page_sz;
+ *min_total_elt_size = bucket_page_sz;
+ /*
+ * Each bucket occupies its own block aligned to
+ * bucket_page_sz, so the required amount of memory is
+ * a multiple of bucket_page_sz.
+ * We also need extra space for a bucket header
+ */
+ return ((obj_num + bd->obj_per_bucket - 1) /
+ bd->obj_per_bucket) * bucket_page_sz;
+}
+
+static int
+bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+ unsigned int bucket_header_sz;
+ unsigned int n_objs;
+ uintptr_t align;
+ uint8_t *iter;
+ int rc;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ align = RTE_PTR_ALIGN_CEIL((uintptr_t)vaddr, bucket_page_sz) -
+ (uintptr_t)vaddr;
+
+ bucket_header_sz = bd->header_size - mp->header_size;
+ if (iova != RTE_BAD_IOVA)
+ iova += align + bucket_header_sz;
+
+ for (iter = (uint8_t *)vaddr + align, n_objs = 0;
+ iter < (uint8_t *)vaddr + len && n_objs < max_objs;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+ unsigned int chunk_len = bd->bucket_mem_size;
+
+ if ((size_t)(iter - (uint8_t *)vaddr) + chunk_len > len)
+ chunk_len = len - (iter - (uint8_t *)vaddr);
+ if (chunk_len <= bucket_header_sz)
+ break;
+ chunk_len -= bucket_header_sz;
+
+ hdr->fill_cnt = 0;
+ hdr->lcore_id = LCORE_ID_ANY;
+ rc = rte_mempool_op_populate_default(mp,
+ RTE_MIN(bd->obj_per_bucket,
+ max_objs - n_objs),
+ iter + bucket_header_sz,
+ iova, chunk_len,
+ obj_cb, obj_cb_arg);
+ if (rc < 0)
+ return rc;
+ n_objs += rc;
+ if (iova != RTE_BAD_IOVA)
+ iova += bucket_page_sz;
+ }
+
+ return n_objs;
+}
+
+static int
+bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
+{
+ struct bucket_data *bd = mp->pool_data;
+
+ info->contig_block_size = bd->obj_per_bucket;
+ return 0;
+}
+
+
+static const struct rte_mempool_ops ops_bucket = {
+ .name = "bucket",
+ .alloc = bucket_alloc,
+ .free = bucket_free,
+ .enqueue = bucket_enqueue,
+ .dequeue = bucket_dequeue,
+ .get_count = bucket_get_count,
+ .calc_mem_size = bucket_calc_mem_size,
+ .populate = bucket_populate,
+ .get_info = bucket_get_info,
+ .dequeue_contig_blocks = bucket_dequeue_contig_blocks,
+};
+
+
+MEMPOOL_REGISTER_OPS(ops_bucket);
diff --git a/drivers/mempool/bucket/rte_mempool_bucket_version.map b/drivers/mempool/bucket/rte_mempool_bucket_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/mempool/bucket/rte_mempool_bucket_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/mempool/dpaa/Makefile b/drivers/mempool/dpaa/Makefile
index 4c0d7aaa..da8da1e9 100644
--- a/drivers/mempool/dpaa/Makefile
+++ b/drivers/mempool/dpaa/Makefile
@@ -22,6 +22,9 @@ EXPORT_MAP := rte_mempool_dpaa_version.map
# Lbrary version
LIBABIVER := 1
+# depends on dpaa bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index fb3b6ba0..10c536bf 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -27,6 +27,13 @@
#include <dpaa_mempool.h>
+/* List of all the memseg information locally maintained in dpaa driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa_memseg_list rte_dpaa_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
+
struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
static int
@@ -115,7 +122,8 @@ dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
struct bm_buffer buf;
int ret;
- DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid);
+ DPAA_MEMPOOL_DEBUG("Free 0x%" PRIx64 " to bpid: %d",
+ addr, bp_info->bpid);
bm_buffer_set64(&buf, addr);
retry:
@@ -154,8 +162,7 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool,
if (unlikely(!bp_info->ptov_off)) {
/* buffers are from single mem segment */
if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
- bp_info->ptov_off
- = (uint64_t)obj_table[i] - phy;
+ bp_info->ptov_off = (size_t)obj_table[i] - phy;
rte_dpaa_bpid_info[bp_info->bpid].ptov_off
= bp_info->ptov_off;
}
@@ -264,10 +271,9 @@ dpaa_mbuf_get_count(const struct rte_mempool *mp)
}
static int
-dpaa_register_memory_area(const struct rte_mempool *mp,
- char *vaddr __rte_unused,
- rte_iova_t paddr __rte_unused,
- size_t len)
+dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
struct dpaa_bp_info *bp_info;
unsigned int total_elt_sz;
@@ -282,14 +288,40 @@ dpaa_register_memory_area(const struct rte_mempool *mp,
bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
- DPAA_MEMPOOL_DEBUG("Req size %lu vs Available %u\n",
- len, total_elt_sz * mp->size);
+ DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n",
+ (uint64_t)len, total_elt_sz * mp->size);
/* Detect pool area has sufficient space for elements in this memzone */
if (len >= total_elt_sz * mp->size)
bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
+ struct dpaa_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
+ if (!ms) {
+ DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
- return 0;
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
}
struct rte_mempool_ops dpaa_mpool_ops = {
@@ -299,7 +331,7 @@ struct rte_mempool_ops dpaa_mpool_ops = {
.enqueue = dpaa_mbuf_free_bulk,
.dequeue = dpaa_mbuf_alloc_bulk,
.get_count = dpaa_mbuf_get_count,
- .register_memory_area = dpaa_register_memory_area,
+ .populate = dpaa_populate,
};
MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
index 9435dd2f..092f326c 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.h
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -46,7 +46,7 @@ static inline void *
DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr)
{
if (bp_info->ptov_off)
- return ((void *)(addr + bp_info->ptov_off));
+ return ((void *) (size_t)(addr + bp_info->ptov_off));
return rte_dpaa_mem_ptov(addr);
}
diff --git a/drivers/mempool/dpaa/meson.build b/drivers/mempool/dpaa/meson.build
new file mode 100644
index 00000000..9163b3db
--- /dev/null
+++ b/drivers/mempool/dpaa/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_dpaa']
+sources = files('dpaa_mempool.c')
+
+# depends on dpaa bus which uses experimental API
+allow_experimental_apis = true
diff --git a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
index d05f274d..60bf50b2 100644
--- a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
+++ b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
@@ -2,6 +2,7 @@ DPDK_17.11 {
global:
rte_dpaa_bpid_info;
+ rte_dpaa_memsegs;
local: *;
};
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
index efaac96e..9e4c87d7 100644
--- a/drivers/mempool/dpaa2/Makefile
+++ b/drivers/mempool/dpaa2/Makefile
@@ -9,14 +9,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_mempool_dpaa2.a
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
-
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
@@ -27,6 +21,9 @@ EXPORT_MAP := rte_mempool_dpaa2_version.map
# Lbrary version
LIBABIVER := 1
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
@@ -34,4 +31,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL)-include := rte_dpaa2_mempool.h
+
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 2bd62e88..e12a0ec8 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -21,16 +21,28 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
+#include "rte_dpaa2_mempool.h"
#include <fslmc_logs.h>
#include <mc/fsl_dpbp.h>
#include <portal/dpaa2_hw_pvt.h>
#include <portal/dpaa2_hw_dpio.h>
#include "dpaa2_hw_mempool.h"
+#include "dpaa2_hw_mempool_logs.h"
struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
static struct dpaa2_bp_list *h_bp_list;
+/* List of all the memseg information locally maintained in dpaa2 driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa2_memseg_list rte_dpaa2_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
+
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_mempool;
+
static int
rte_hw_mbuf_create_pool(struct rte_mempool *mp)
{
@@ -44,30 +56,30 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
avail_dpbp = dpaa2_alloc_dpbp_dev();
if (!avail_dpbp) {
- PMD_DRV_LOG(ERR, "DPAA2 resources not available");
+ DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
return -ENOENT;
}
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_MEMPOOL_ERR("Failure in affining portal");
goto err1;
}
}
ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Resource enable failure with"
- " err code: %d\n", ret);
+ DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
+ ret);
goto err1;
}
ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
avail_dpbp->token, &dpbp_attr);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Resource read failure with"
- " err code: %d\n", ret);
+ DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
+ ret);
goto err2;
}
@@ -75,7 +87,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
sizeof(struct dpaa2_bp_info),
RTE_CACHE_LINE_SIZE);
if (!bp_info) {
- PMD_INIT_LOG(ERR, "No heap memory available for bp_info");
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
ret = -ENOMEM;
goto err2;
}
@@ -84,7 +96,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
RTE_CACHE_LINE_SIZE);
if (!bp_list) {
- PMD_INIT_LOG(ERR, "No heap memory available");
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
ret = -ENOMEM;
goto err3;
}
@@ -112,7 +124,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
sizeof(struct dpaa2_bp_info));
mp->pool_data = (void *)bp_info;
- PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
+ DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
h_bp_list = bp_list;
return 0;
@@ -134,7 +146,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp->pool_data) {
- PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool");
+ DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
return;
}
@@ -180,7 +192,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
return;
}
}
@@ -233,6 +245,35 @@ aligned:
}
}
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return -ENOMEM;
+ }
+
+ return bp_info->bpid;
+}
+
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return NULL;
+ }
+
+ return (struct rte_mbuf *)((uint8_t *)buf_addr -
+ bp_info->meta_data_size);
+}
+
int
rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
void **obj_table, unsigned int count)
@@ -242,7 +283,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
#endif
struct qbman_swp *swp;
uint16_t bpid;
- uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+ size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
int i, ret;
unsigned int n = 0;
struct dpaa2_bp_info *bp_info;
@@ -250,7 +291,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
return -ENOENT;
}
@@ -259,7 +300,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
return ret;
}
}
@@ -270,18 +311,18 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
* then the remainder.
*/
if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
- ret = qbman_swp_acquire(swp, bpid, bufs,
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
DPAA2_MBUF_MAX_ACQ_REL);
} else {
- ret = qbman_swp_acquire(swp, bpid, bufs,
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
count - n);
}
/* In case of less than requested number of buffers available
* in pool, qbman_swp_acquire returns 0
*/
if (ret <= 0) {
- PMD_TX_LOG(ERR, "Buffer acquire failed with"
- " err code: %d", ret);
+ DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
+ " err code: %d", ret);
/* The API expect the exact number of requested bufs */
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
@@ -290,10 +331,11 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
}
/* assigning mbuf from the acquired objects */
for (i = 0; (i < ret) && bufs[i]; i++) {
- DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
+ DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
obj_table[n] = (struct rte_mbuf *)
(bufs[i] - bp_info->meta_data_size);
- PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Acquired %p address %p from BMAN\n",
(void *)bufs[i], (void *)obj_table[n]);
n++;
}
@@ -301,8 +343,8 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
alloc += n;
- PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d",
- alloc, count, n);
+ DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
+ alloc, count, n);
#endif
return 0;
}
@@ -315,7 +357,7 @@ rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
return -ENOENT;
}
rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
@@ -333,7 +375,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp || !mp->pool_data) {
- RTE_LOG(ERR, PMD, "Invalid mempool provided\n");
+ DPAA2_MEMPOOL_ERR("Invalid mempool provided");
return 0;
}
@@ -343,16 +385,51 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
dpbp_node->token, &num_of_bufs);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n",
- ret);
+ DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
+ ret);
return 0;
}
- RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs);
+ DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
return num_of_bufs;
}
+static int
+dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct dpaa2_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
+ if (!ms) {
+ DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
+
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
+}
+
struct rte_mempool_ops dpaa2_mpool_ops = {
.name = DPAA2_MEMPOOL_OPS_NAME,
.alloc = rte_hw_mbuf_create_pool,
@@ -360,6 +437,16 @@ struct rte_mempool_ops dpaa2_mpool_ops = {
.enqueue = rte_hw_mbuf_free_bulk,
.dequeue = rte_dpaa2_mbuf_alloc_bulk,
.get_count = rte_hw_mbuf_get_count,
+ .populate = dpaa2_populate,
};
MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
+
+RTE_INIT(dpaa2_mempool_init_log);
+static void
+dpaa2_mempool_init_log(void)
+{
+ dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
+ if (dpaa2_logtype_mempool >= 0)
+ rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
+}
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
new file mode 100644
index 00000000..c79b3d1c
--- /dev/null
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
@@ -0,0 +1,38 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef _DPAA2_HW_MEMPOOL_LOGS_H_
+#define _DPAA2_HW_MEMPOOL_LOGS_H_
+
+extern int dpaa2_logtype_mempool;
+
+#define DPAA2_MEMPOOL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: " fmt "\n", ##args)
+
+/* Debug logs are with Function names */
+#define DPAA2_MEMPOOL_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: %s(): " fmt "\n", __func__, ##args)
+
+#define DPAA2_MEMPOOL_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_ERR(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(ERR, fmt, ## args)
+#define DPAA2_MEMPOOL_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_MEMPOOL_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_MEMPOOL_DP_DEBUG(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_HW_MEMPOOL_LOGS_H_ */
diff --git a/drivers/mempool/dpaa2/meson.build b/drivers/mempool/dpaa2/meson.build
new file mode 100644
index 00000000..90bab606
--- /dev/null
+++ b/drivers/mempool/dpaa2/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_fslmc']
+sources = files('dpaa2_hw_mempool.c')
+
+# depends on fslmc bus which uses experimental API
+allow_experimental_apis = true
diff --git a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
new file mode 100644
index 00000000..4a22b7c4
--- /dev/null
+++ b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_DPAA2_MEMPOOL_H__
+#define __RTE_DPAA2_MEMPOOL_H__
+
+/**
+ * @file
+ *
+ * NXP specific mempool related functions.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mempool.h>
+
+/**
+ * Get BPID corresponding to the packet pool
+ *
+ * @param mp
+ * memory pool
+ *
+ * @return
+ * BPID of the buffer pool
+ */
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp);
+
+/**
+ * Get MBUF from the corresponding 'buf_addr'
+ *
+ * @param mp
+ * memory pool
+ * @param buf_addr
+ * The 'buf_addr' of the mbuf. This is the start buffer address
+ * of the packet buffer (mbuf).
+ *
+ * @return
+ * - MBUF pointer for success
+ * - NULL in case of error
+ */
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DPAA2_MEMPOOL_H__ */
diff --git a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
index a8aa685c..b9d996a6 100644
--- a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
+++ b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
@@ -3,6 +3,15 @@ DPDK_17.05 {
rte_dpaa2_bpid_info;
rte_dpaa2_mbuf_alloc_bulk;
+ rte_dpaa2_memsegs;
local: *;
};
+
+DPDK_18.05 {
+ global:
+
+ rte_dpaa2_mbuf_from_buf_addr;
+ rte_dpaa2_mbuf_pool_bpid;
+
+} DPDK_17.05;
diff --git a/drivers/mempool/meson.build b/drivers/mempool/meson.build
index 59918560..4527d980 100644
--- a/drivers/mempool/meson.build
+++ b/drivers/mempool/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['ring', 'stack', 'octeontx']
+drivers = ['bucket', 'dpaa', 'dpaa2', 'octeontx', 'ring', 'stack']
std_deps = ['mempool']
config_flag_fmt = 'RTE_LIBRTE_@0@_MEMPOOL'
driver_name_fmt = 'rte_mempool_@0@'
diff --git a/drivers/mempool/octeontx/Makefile b/drivers/mempool/octeontx/Makefile
index dfc373e6..a3e1dce8 100644
--- a/drivers/mempool/octeontx/Makefile
+++ b/drivers/mempool/octeontx/Makefile
@@ -10,6 +10,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_mempool_octeontx.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
EXPORT_MAP := rte_mempool_octeontx_version.map
LIBABIVER := 1
@@ -17,8 +18,6 @@ LIBABIVER := 1
#
# all source are stored in SRCS-y
#
-SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_ssovf.c
-SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_mbox.c
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_fpavf.c
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += rte_mempool_octeontx.c
@@ -36,6 +35,6 @@ CFLAGS_rte_mempool_octeontx.o += -Ofast
endif
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf
-LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_pci -lrte_common_octeontx
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/octeontx/meson.build b/drivers/mempool/octeontx/meson.build
index 1e894a56..3baaf7db 100644
--- a/drivers/mempool/octeontx/meson.build
+++ b/drivers/mempool/octeontx/meson.build
@@ -1,10 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Cavium, Inc
-sources = files('octeontx_ssovf.c',
- 'octeontx_mbox.c',
- 'octeontx_fpavf.c',
+sources = files('octeontx_fpavf.c',
'rte_mempool_octeontx.c'
)
-deps += ['mbuf', 'bus_pci']
+deps += ['mbuf', 'bus_pci', 'common_octeontx']
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 61c72c7c..7aecaa85 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -115,10 +115,6 @@ otx_pool_init_log(void)
octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
if (octeontx_logtype_fpavf >= 0)
rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE);
-
- octeontx_logtype_fpavf_mbox = rte_log_register("pmd.mempool.octeontx.mbox");
- if (octeontx_logtype_fpavf_mbox >= 0)
- rte_log_set_level(octeontx_logtype_fpavf_mbox, RTE_LOG_NOTICE);
}
/* lock is taken by caller */
@@ -253,7 +249,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
cfg.pool_stack_end = phys_addr + memsz;
cfg.aura_cfg = (1 << 9);
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
&resp, sizeof(resp));
if (ret < 0) {
@@ -298,7 +294,7 @@ octeontx_fpapf_pool_destroy(unsigned int gpool_index)
cfg.pool_stack_end = 0;
cfg.aura_cfg = 0;
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
&resp, sizeof(resp));
if (ret < 0) {
@@ -333,7 +329,7 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
cfg.aid = gpool_index; /* gpool is guara */
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
&resp, sizeof(resp));
if (ret < 0) {
@@ -363,7 +359,7 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
hdr.coproc = FPA_COPROC;
hdr.msg = FPA_DETACHAURA;
hdr.vfid = gpool_index;
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
+ ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
if (ret < 0) {
fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
gpool_index, ret, hdr.res_code);
@@ -410,7 +406,7 @@ octeontx_fpapf_start_count(uint16_t gpool_index)
hdr.coproc = FPA_COPROC;
hdr.msg = FPA_START_COUNT;
hdr.vfid = gpool_index;
- ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (ret < 0) {
fpavf_log_err("Could not start buffer counting for ");
fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
diff --git a/drivers/mempool/octeontx/octeontx_mbox.h b/drivers/mempool/octeontx/octeontx_mbox.h
deleted file mode 100644
index 1b056071..00000000
--- a/drivers/mempool/octeontx/octeontx_mbox.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#ifndef __OCTEONTX_MBOX_H__
-#define __OCTEONTX_MBOX_H__
-
-#include <rte_common.h>
-
-#define SSOW_BAR4_LEN (64 * 1024)
-#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
-
-struct octeontx_ssovf_info {
- uint16_t domain; /* Domain id */
- uint8_t total_ssovfs; /* Total sso groups available in domain */
- uint8_t total_ssowvfs;/* Total sso hws available in domain */
-};
-
-enum octeontx_ssovf_type {
- OCTEONTX_SSO_GROUP, /* SSO group vf */
- OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
-};
-
-struct octeontx_mbox_hdr {
- uint16_t vfid; /* VF index or pf resource index local to the domain */
- uint8_t coproc; /* Coprocessor id */
- uint8_t msg; /* Message id */
- uint8_t res_code; /* Functional layer response code */
-};
-
-int octeontx_ssovf_info(struct octeontx_ssovf_info *info);
-void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar);
-int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr,
- void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
-
-#endif /* __OCTEONTX_MBOX_H__ */
diff --git a/drivers/mempool/octeontx/octeontx_pool_logs.h b/drivers/mempool/octeontx/octeontx_pool_logs.h
index 95865192..7b4e1b38 100644
--- a/drivers/mempool/octeontx/octeontx_pool_logs.h
+++ b/drivers/mempool/octeontx/octeontx_pool_logs.h
@@ -11,21 +11,12 @@
rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf,\
"%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
-#define MBOX_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf_mbox,\
- "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
-
#define fpavf_log_info(fmt, ...) FPAVF_LOG(INFO, fmt, ##__VA_ARGS__)
#define fpavf_log_dbg(fmt, ...) FPAVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
#define fpavf_log_err(fmt, ...) FPAVF_LOG(ERR, fmt, ##__VA_ARGS__)
#define fpavf_func_trace fpavf_log_dbg
-#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__)
-#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__)
-#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__)
-#define mbox_func_trace mbox_log_dbg
extern int octeontx_logtype_fpavf;
-extern int octeontx_logtype_fpavf_mbox;
#endif /* __OCTEONTX_POOL_LOGS_H__*/
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index d143d05c..ab94dfe9 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -126,28 +126,66 @@ octeontx_fpavf_get_count(const struct rte_mempool *mp)
return octeontx_fpa_bufpool_free_count(pool);
}
-static int
-octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags)
+static ssize_t
+octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
{
- RTE_SET_USED(mp);
- *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
- MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
- return 0;
+ ssize_t mem_size;
+
+ /*
+ * Simply need space for one more object to be able to
+ * fulfil alignment requirements.
+ */
+ mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1,
+ pg_shift,
+ min_chunk_size, align);
+ if (mem_size >= 0) {
+ /*
+ * Memory area which contains objects must be physically
+ * contiguous.
+ */
+ *min_chunk_size = mem_size;
+ }
+
+ return mem_size;
}
static int
-octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
- char *vaddr, rte_iova_t paddr, size_t len)
+octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
- RTE_SET_USED(paddr);
+ size_t total_elt_sz;
+ size_t off;
uint8_t gpool;
uintptr_t pool_bar;
+ int ret;
+
+ if (iova == RTE_BAD_IOVA)
+ return -EINVAL;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ /* align object start address to a multiple of total_elt_sz */
+ off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
+
+ if (len < off)
+ return -EINVAL;
+
+ vaddr = (char *)vaddr + off;
+ iova += off;
+ len -= off;
gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
- return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
+ ret = octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
+ if (ret < 0)
+ return ret;
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len,
+ obj_cb, obj_cb_arg);
}
static struct rte_mempool_ops octeontx_fpavf_ops = {
@@ -157,8 +195,8 @@ static struct rte_mempool_ops octeontx_fpavf_ops = {
.enqueue = octeontx_fpavf_enqueue,
.dequeue = octeontx_fpavf_dequeue,
.get_count = octeontx_fpavf_get_count,
- .get_capabilities = octeontx_fpavf_get_capabilities,
- .register_memory_area = octeontx_fpavf_register_memory_area,
+ .calc_mem_size = octeontx_fpavf_calc_mem_size,
+ .populate = octeontx_fpavf_populate,
};
MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
index fe8cdeca..a7530317 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
@@ -1,9 +1,3 @@
DPDK_17.11 {
- global:
-
- octeontx_ssovf_info;
- octeontx_ssovf_bar;
- octeontx_ssovf_mbox_send;
-
local: *;
};
diff --git a/drivers/meson.build b/drivers/meson.build
index b41a0f18..ac6c9729 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -2,11 +2,14 @@
# Copyright(c) 2017 Intel Corporation
# Defines the order in which the drivers are buit.
-driver_classes = ['bus',
- 'mempool', # depends on bus.
- 'net', # depends on bus and mempool.
- 'crypto', # depenss on bus, mempool (net in future).
- 'event'] # depends on bus, mempool and net.
+driver_classes = ['common',
+ 'bus',
+ 'mempool', # depends on common and bus.
+ 'net', # depends on common, bus and mempool.
+ 'crypto', # depends on common, bus and mempool (net in future).
+ 'compress', # depends on common, bus, mempool.
+ 'event', # depends on common, bus, mempool and net.
+ 'raw'] # depends on common, bus, mempool, net and event.
foreach class:driver_classes
drivers = []
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e1127326..9f9da665 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -12,11 +12,16 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf
DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
+DIRS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe
DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
+endif
DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000
DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena
DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
@@ -27,7 +32,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
-DIRS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl
+DIRS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mvpp2
DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null
@@ -53,9 +58,12 @@ endif # $(CONFIG_RTE_LIBRTE_SCHED)
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost
+ifeq ($(CONFIG_RTE_EAL_VFIO),y)
+DIRS-$(CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD) += ifc
+endif
endif # $(CONFIG_RTE_LIBRTE_VHOST)
-ifeq ($(CONFIG_RTE_LIBRTE_MRVL_PMD),y)
+ifeq ($(CONFIG_RTE_LIBRTE_MVPP2_PMD),y)
ifeq ($(CONFIG_RTE_LIBRTE_CFGFILE),n)
$(error "RTE_LIBRTE_CFGFILE must be enabled in configuration!")
endif
diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile
index bb37d67a..39a1e0d2 100644
--- a/drivers/net/af_packet/Makefile
+++ b/drivers/net/af_packet/Makefile
@@ -1,35 +1,8 @@
-# BSD LICENSE
-#
-# Copyright(c) 2014 John W. Linville <linville@redhat.com>
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright(c) 2014 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014 John W. Linville <linville@redhat.com>
+# Copyright(c) 2010-2014 Intel Corporation.
+# Copyright(c) 2014 6WIND S.A.
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index 57eccfd0..ea47abbf 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -94,9 +94,15 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_autoneg = ETH_LINK_FIXED,
};
+static int af_packet_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, af_packet_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static uint16_t
eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
@@ -393,8 +399,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
if (data_size > buf_size) {
- RTE_LOG(ERR, PMD,
- "%s: %d bytes will not fit in mbuf (%d bytes)\n",
+ PMD_LOG(ERR,
+ "%s: %d bytes will not fit in mbuf (%d bytes)",
dev->device->name, data_size, buf_size);
return -ENOMEM;
}
@@ -515,7 +521,7 @@ open_packet_iface(const char *key __rte_unused,
/* Open an AF_PACKET socket... */
*sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (*sockfd == -1) {
- RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n");
+ PMD_LOG(ERR, "Could not open AF_PACKET socket");
return -1;
}
@@ -561,28 +567,20 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
break;
}
if (pair == NULL) {
- RTE_LOG(ERR, PMD,
- "%s: no interface specified for AF_PACKET ethdev\n",
+ PMD_LOG(ERR,
+ "%s: no interface specified for AF_PACKET ethdev",
name);
- goto error_early;
+ return -1;
}
- RTE_LOG(INFO, PMD,
- "%s: creating AF_PACKET-backed ethdev on numa socket %u\n",
+ PMD_LOG(INFO,
+ "%s: creating AF_PACKET-backed ethdev on numa socket %u",
name, numa_node);
- /*
- * now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- goto error_early;
-
*internals = rte_zmalloc_socket(name, sizeof(**internals),
0, numa_node);
if (*internals == NULL)
- goto error_early;
+ return -1;
for (q = 0; q < nb_queues; q++) {
(*internals)->rx_queue[q].map = MAP_FAILED;
@@ -601,27 +599,27 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
memcpy(ifr.ifr_name, pair->value, ifnamelen);
ifr.ifr_name[ifnamelen] = '\0';
} else {
- RTE_LOG(ERR, PMD,
- "%s: I/F name too long (%s)\n",
+ PMD_LOG(ERR,
+ "%s: I/F name too long (%s)",
name, pair->value);
- goto error_early;
+ return -1;
}
if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
- RTE_LOG(ERR, PMD,
- "%s: ioctl failed (SIOCGIFINDEX)\n",
+ PMD_LOG(ERR,
+ "%s: ioctl failed (SIOCGIFINDEX)",
name);
- goto error_early;
+ return -1;
}
(*internals)->if_name = strdup(pair->value);
if ((*internals)->if_name == NULL)
- goto error_early;
+ return -1;
(*internals)->if_index = ifr.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
- RTE_LOG(ERR, PMD,
- "%s: ioctl failed (SIOCGIFHWADDR)\n",
+ PMD_LOG(ERR,
+ "%s: ioctl failed (SIOCGIFHWADDR)",
name);
- goto error_early;
+ return -1;
}
memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
@@ -642,8 +640,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
/* Open an AF_PACKET socket for this queue... */
qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (qsockfd == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not open AF_PACKET socket\n",
+ PMD_LOG(ERR,
+ "%s: could not open AF_PACKET socket",
name);
return -1;
}
@@ -652,9 +650,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
&tpver, sizeof(tpver));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not set PACKET_VERSION on AF_PACKET "
- "socket for %s\n", name, pair->value);
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_VERSION on AF_PACKET socket for %s",
+ name, pair->value);
goto error;
}
@@ -662,9 +660,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
&discard, sizeof(discard));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not set PACKET_LOSS on "
- "AF_PACKET socket for %s\n", name, pair->value);
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_LOSS on AF_PACKET socket for %s",
+ name, pair->value);
goto error;
}
@@ -672,10 +670,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
&qdisc_bypass, sizeof(qdisc_bypass));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not set PACKET_QDISC_BYPASS "
- "on AF_PACKET socket for %s\n", name,
- pair->value);
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s",
+ name, pair->value);
goto error;
}
#else
@@ -684,17 +681,17 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not set PACKET_RX_RING on AF_PACKET "
- "socket for %s\n", name, pair->value);
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_RX_RING on AF_PACKET socket for %s",
+ name, pair->value);
goto error;
}
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
+ PMD_LOG(ERR,
"%s: could not set PACKET_TX_RING on AF_PACKET "
- "socket for %s\n", name, pair->value);
+ "socket for %s", name, pair->value);
goto error;
}
@@ -705,8 +702,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
qsockfd, 0);
if (rx_queue->map == MAP_FAILED) {
- RTE_LOG(ERR, PMD,
- "%s: call to mmap failed on AF_PACKET socket for %s\n",
+ PMD_LOG(ERR,
+ "%s: call to mmap failed on AF_PACKET socket for %s",
name, pair->value);
goto error;
}
@@ -742,8 +739,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not bind AF_PACKET socket to %s\n",
+ PMD_LOG(ERR,
+ "%s: could not bind AF_PACKET socket to %s",
name, pair->value);
goto error;
}
@@ -752,9 +749,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
&fanout_arg, sizeof(fanout_arg));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
+ PMD_LOG(ERR,
"%s: could not set PACKET_FANOUT on AF_PACKET socket "
- "for %s\n", name, pair->value);
+ "for %s", name, pair->value);
goto error;
}
#endif
@@ -775,14 +772,13 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
(*internals)->nb_queues = nb_queues;
- rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+ data = (*eth_dev)->data;
data->dev_private = *internals;
data->nb_rx_queues = (uint16_t)nb_queues;
data->nb_tx_queues = (uint16_t)nb_queues;
data->dev_link = pmd_link;
data->mac_addrs = &(*internals)->eth_addr;
- (*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
return 0;
@@ -802,8 +798,6 @@ error:
}
free((*internals)->if_name);
rte_free(*internals);
-error_early:
- rte_free(data);
return -1;
}
@@ -837,8 +831,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
qpairs = atoi(pair->value);
if (qpairs < 1 ||
qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
- RTE_LOG(ERR, PMD,
- "%s: invalid qpairs value\n",
+ PMD_LOG(ERR,
+ "%s: invalid qpairs value",
name);
return -1;
}
@@ -847,8 +841,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
blocksize = atoi(pair->value);
if (!blocksize) {
- RTE_LOG(ERR, PMD,
- "%s: invalid blocksize value\n",
+ PMD_LOG(ERR,
+ "%s: invalid blocksize value",
name);
return -1;
}
@@ -857,8 +851,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
framesize = atoi(pair->value);
if (!framesize) {
- RTE_LOG(ERR, PMD,
- "%s: invalid framesize value\n",
+ PMD_LOG(ERR,
+ "%s: invalid framesize value",
name);
return -1;
}
@@ -867,8 +861,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
framecount = atoi(pair->value);
if (!framecount) {
- RTE_LOG(ERR, PMD,
- "%s: invalid framecount value\n",
+ PMD_LOG(ERR,
+ "%s: invalid framecount value",
name);
return -1;
}
@@ -877,8 +871,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) {
qdisc_bypass = atoi(pair->value);
if (qdisc_bypass > 1) {
- RTE_LOG(ERR, PMD,
- "%s: invalid bypass value\n",
+ PMD_LOG(ERR,
+ "%s: invalid bypass value",
name);
return -1;
}
@@ -887,24 +881,24 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
}
if (framesize > blocksize) {
- RTE_LOG(ERR, PMD,
- "%s: AF_PACKET MMAP frame size exceeds block size!\n",
+ PMD_LOG(ERR,
+ "%s: AF_PACKET MMAP frame size exceeds block size!",
name);
return -1;
}
blockcount = framecount / (blocksize / framesize);
if (!blockcount) {
- RTE_LOG(ERR, PMD,
- "%s: invalid AF_PACKET MMAP parameters\n", name);
+ PMD_LOG(ERR,
+ "%s: invalid AF_PACKET MMAP parameters", name);
return -1;
}
- RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name);
- RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize);
- RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount);
- RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize);
- RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount);
+ PMD_LOG(INFO, "%s: AF_PACKET MMAP parameters:", name);
+ PMD_LOG(INFO, "%s:\tblock size %d", name, blocksize);
+ PMD_LOG(INFO, "%s:\tblock count %d", name, blockcount);
+ PMD_LOG(INFO, "%s:\tframe size %d", name, framesize);
+ PMD_LOG(INFO, "%s:\tframe count %d", name, framecount);
if (rte_pmd_init_internals(dev, *sockfd, qpairs,
blocksize, blockcount,
@@ -917,6 +911,7 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
eth_dev->rx_pkt_burst = eth_af_packet_rx;
eth_dev->tx_pkt_burst = eth_af_packet_tx;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -926,9 +921,23 @@ rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
int ret = 0;
struct rte_kvargs *kvlist;
int sockfd = -1;
+ struct rte_eth_dev *eth_dev;
+ const char *name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n",
- rte_vdev_device_name(dev));
+ PMD_LOG(INFO, "Initializing pmd_af_packet for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL) {
@@ -966,8 +975,8 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
struct pmd_internals *internals;
unsigned q;
- RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n",
- rte_socket_id());
+ PMD_LOG(INFO, "Closing AF_PACKET ethdev on numa socket %u",
+ rte_socket_id());
if (dev == NULL)
return -1;
@@ -985,7 +994,6 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
free(internals->if_name);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
@@ -1006,3 +1014,12 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
"framesz=<int> "
"framecnt=<int> "
"qdisc_bypass=<0|1>");
+
+RTE_INIT(af_packet_init_log);
+static void
+af_packet_init_log(void)
+{
+ af_packet_logtype = rte_log_register("pmd.net.packet");
+ if (af_packet_logtype >= 0)
+ rte_log_set_level(af_packet_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index ff87c20e..834d8a9e 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -69,7 +69,7 @@ static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev);
static int eth_ark_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev);
-static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int eth_ark_macaddr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
@@ -390,6 +390,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
if (p == 0) {
/* First port is already allocated by DPDK */
eth_dev = ark->eth_dev;
+ rte_eth_dev_probing_finish(eth_dev);
continue;
}
@@ -422,6 +423,8 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
ark->user_data[eth_dev->data->port_id] =
ark->user_ext.dev_init(dev, ark->a_bar, p);
}
+
+ rte_eth_dev_probing_finish(eth_dev);
}
return ret;
@@ -771,7 +774,6 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G |
ETH_LINK_SPEED_100G);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
}
static int
@@ -887,16 +889,19 @@ eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
ark->user_data[dev->data->port_id]);
}
-static void
+static int
eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
- if (ark->user_ext.mac_addr_set)
+ if (ark->user_ext.mac_addr_set) {
ark->user_ext.mac_addr_set(dev, mac_addr,
ark->user_data[dev->data->port_id]);
+ return 0;
+ }
+ return -ENOTSUP;
}
static int
diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c
index 4df66170..ad83a57e 100644
--- a/drivers/net/avf/avf_ethdev.c
+++ b/drivers/net/avf/avf_ethdev.c
@@ -65,7 +65,7 @@ static int avf_dev_rss_hash_update(struct rte_eth_dev *dev,
static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
+static int avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
@@ -339,17 +339,18 @@ static int avf_config_rx_queues_irqs(struct rte_eth_dev *dev,
AVF_WRITE_FLUSH(hw);
/* map all queues to the same interrupt */
for (i = 0; i < dev->data->nb_rx_queues; i++)
- vf->rxq_map[0] |= 1 << i;
+ vf->rxq_map[vf->msix_base] |= 1 << i;
} else {
if (!rte_intr_allow_others(intr_handle)) {
vf->nb_msix = 1;
vf->msix_base = AVF_MISC_VEC_ID;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- vf->rxq_map[0] |= 1 << i;
+ vf->rxq_map[vf->msix_base] |= 1 << i;
intr_handle->intr_vec[i] = AVF_MISC_VEC_ID;
}
PMD_DRV_LOG(DEBUG,
- "vector 0 are mapping to all Rx queues");
+ "vector %u are mapping to all Rx queues",
+ vf->msix_base);
} else {
/* If Rx interrupt is reuquired, and we can use
* multi interrupts, then the vec is from 1
@@ -474,7 +475,7 @@ avf_dev_stop(struct rte_eth_dev *dev)
{
struct avf_adapter *adapter =
AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = dev->intr_handle;
int ret, i;
@@ -507,7 +508,6 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
@@ -532,13 +532,13 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -608,7 +608,7 @@ avf_dev_link_update(struct rte_eth_dev *dev,
new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
new_link.link_status = vf->link_up ? ETH_LINK_UP :
ETH_LINK_DOWN;
- new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds &
+ new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
@@ -759,7 +759,7 @@ avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
err = avf_enable_vlan_strip(adapter);
else
err = avf_disable_vlan_strip(adapter);
@@ -926,7 +926,7 @@ avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
-static void
+static int
avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
@@ -940,11 +940,11 @@ avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
perm_addr = (struct ether_addr *)hw->mac.perm_addr;
if (is_same_ether_addr(mac_addr, old_addr))
- return;
+ return 0;
/* If the MAC address is configured by host, skip the setting */
if (is_valid_assigned_ether_addr(perm_addr))
- return;
+ return -EPERM;
ret = avf_add_del_eth_addr(adapter, old_addr, FALSE);
if (ret)
@@ -968,7 +968,11 @@ avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
mac_addr->addr_bytes[4],
mac_addr->addr_bytes[5]);
+ if (ret)
+ return -EIO;
+
ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+ return 0;
}
static int
@@ -1365,8 +1369,8 @@ avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
return AVF_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
- mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
- alignment, RTE_PGSIZE_2M);
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
return AVF_ERR_NO_MEMORY;
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index d276d975..e03a136f 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -109,7 +109,7 @@ check_rx_vec_allow(struct avf_rx_queue *rxq)
static inline bool
check_tx_vec_allow(struct avf_tx_queue *txq)
{
- if ((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS &&
+ if (!(txq->offloads & AVF_NO_VECTOR_FLAGS) &&
txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST &&
txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) {
PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
@@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t i, base, bsf, tc_mapping;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
nb_desc > AVF_MAX_RING_DESC ||
nb_desc < AVF_MIN_RING_DESC) {
@@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->free_thresh = tx_free_thresh;
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
/* Allocate software ring */
@@ -1831,7 +1834,7 @@ avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->free_thresh;
qinfo->conf.tx_rs_thresh = txq->rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
diff --git a/drivers/net/avf/avf_rxtx.h b/drivers/net/avf/avf_rxtx.h
index d1701cd6..297d0776 100644
--- a/drivers/net/avf/avf_rxtx.h
+++ b/drivers/net/avf/avf_rxtx.h
@@ -22,8 +22,12 @@
#define AVF_VPMD_DESCS_PER_LOOP 4
#define AVF_VPMD_TX_MAX_FREE_BUF 64
-#define AVF_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
+#define AVF_NO_VECTOR_FLAGS ( \
+ DEV_TX_OFFLOAD_MULTI_SEGS | \
+ DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_SCTP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM)
#define DEFAULT_TX_RS_THRESH 32
#define DEFAULT_TX_FREE_THRESH 32
@@ -125,7 +129,7 @@ struct avf_tx_queue {
uint16_t port_id;
uint16_t queue_id;
- uint32_t txq_flags;
+ uint64_t offloads;
uint16_t next_dd; /* next to set RS, for VPMD */
uint16_t next_rs; /* next to check DD, for VPMD */
diff --git a/drivers/net/avp/Makefile b/drivers/net/avp/Makefile
index c29ecf45..c9db667f 100644
--- a/drivers/net/avp/Makefile
+++ b/drivers/net/avp/Makefile
@@ -1,34 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2013-2017, Wind River Systems, Inc. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Wind River Systems nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2013-2017, Wind River Systems, Inc.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index dba99120..dc97e60e 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (c) 2013-2017, Wind River Systems, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1) Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2) Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3) Neither the name of Wind River Systems nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
*/
#include <stdint.h>
@@ -1076,19 +1048,8 @@ static int
eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- struct rte_eth_dev *eth_dev;
- int ret;
-
- eth_dev = rte_eth_dev_pci_allocate(pci_dev,
- sizeof(struct avp_adapter));
- if (eth_dev == NULL)
- return -ENOMEM;
-
- ret = eth_avp_dev_init(eth_dev);
- if (ret)
- rte_eth_dev_pci_release(eth_dev);
-
- return ret;
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter),
+ eth_avp_dev_init);
}
static int
@@ -2074,12 +2035,6 @@ avp_dev_start(struct rte_eth_dev *eth_dev)
goto unlock;
}
- /* disable features that we do not support */
- eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0;
- eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0;
- eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0;
- eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0;
-
/* update link state */
ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
if (ret < 0) {
@@ -2206,7 +2161,6 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->max_rx_queues = avp->max_rx_queues;
dev_info->max_tx_queues = avp->max_tx_queues;
dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
@@ -2222,10 +2176,12 @@ static int
avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+ uint64_t offloads = dev_conf->rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
else
avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2235,12 +2191,12 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
}
diff --git a/drivers/net/avp/avp_logs.h b/drivers/net/avp/avp_logs.h
index e29394d5..6e297c7a 100644
--- a/drivers/net/avp/avp_logs.h
+++ b/drivers/net/avp/avp_logs.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (c) 2013-2015, Wind River Systems, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1) Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2) Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3) Neither the name of Wind River Systems nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
*/
#ifndef _AVP_LOGS_H_
diff --git a/drivers/net/avp/rte_avp_common.h b/drivers/net/avp/rte_avp_common.h
index 81dfe5ea..aa95159c 100644
--- a/drivers/net/avp/rte_avp_common.h
+++ b/drivers/net/avp/rte_avp_common.h
@@ -1,57 +1,6 @@
-/*-
- * This file is provided under a dual BSD/LGPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GNU LESSER GENERAL PUBLIC LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * Contact Information:
- * Wind River Systems, Inc.
- *
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
+/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1)
+ * Copyright(c) 2010-2013 Intel Corporation.
+ * Copyright(c) 2014-2017 Wind River Systems, Inc.
*/
#ifndef _RTE_AVP_COMMON_H_
diff --git a/drivers/net/avp/rte_avp_fifo.h b/drivers/net/avp/rte_avp_fifo.h
index 803eb80a..c1658da6 100644
--- a/drivers/net/avp/rte_avp_fifo.h
+++ b/drivers/net/avp/rte_avp_fifo.h
@@ -1,57 +1,6 @@
-/*-
- * This file is provided under a dual BSD/LGPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GNU LESSER GENERAL PUBLIC LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Wind River Systems, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * Contact Information:
- * Wind River Systems, Inc.
- *
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
+/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1)
+ * Copyright(c) 2010-2013 Intel Corporation.
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
*/
#ifndef _RTE_AVP_FIFO_H_
diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
new file mode 100644
index 00000000..72215aed
--- /dev/null
+++ b/drivers/net/axgbe/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_axgbe.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_axgbe_version.map
+
+LIBABIVER := 1
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_ethdev
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c
+ifeq ($(CONFIG_RTE_ARCH_X86),y)
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx_vec_sse.c
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
new file mode 100644
index 00000000..d25d54ca
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -0,0 +1,1710 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef __AXGBE_COMMON_H__
+#define __AXGBE_COMMON_H__
+
+#include "axgbe_logs.h"
+
+#include <stdbool.h>
+#include <limits.h>
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <pthread.h>
+
+#include <rte_byteorder.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_hexdump.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_branch_prediction.h>
+#include <rte_eal.h>
+#include <rte_memzone.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev_pci.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+
+#define BIT(nr) (1 << (nr))
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#define AXGBE_HZ 250
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_AXIAWRCR 0x301c
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+#define EDMA_TX_CONTROL 0x3040
+#define EDMA_RX_CONTROL 0x3044
+
+/* DMA register entry bit positions and sizes */
+#define DMA_AXIARCR_DRC_INDEX 0
+#define DMA_AXIARCR_DRC_WIDTH 4
+#define DMA_AXIARCR_DRD_INDEX 4
+#define DMA_AXIARCR_DRD_WIDTH 2
+#define DMA_AXIARCR_TEC_INDEX 8
+#define DMA_AXIARCR_TEC_WIDTH 4
+#define DMA_AXIARCR_TED_INDEX 12
+#define DMA_AXIARCR_TED_WIDTH 2
+#define DMA_AXIARCR_THC_INDEX 16
+#define DMA_AXIARCR_THC_WIDTH 4
+#define DMA_AXIARCR_THD_INDEX 20
+#define DMA_AXIARCR_THD_WIDTH 2
+#define DMA_AXIAWCR_DWC_INDEX 0
+#define DMA_AXIAWCR_DWC_WIDTH 4
+#define DMA_AXIAWCR_DWD_INDEX 4
+#define DMA_AXIAWCR_DWD_WIDTH 2
+#define DMA_AXIAWCR_RPC_INDEX 8
+#define DMA_AXIAWCR_RPC_WIDTH 4
+#define DMA_AXIAWCR_RPD_INDEX 12
+#define DMA_AXIAWCR_RPD_WIDTH 2
+#define DMA_AXIAWCR_RHC_INDEX 16
+#define DMA_AXIAWCR_RHC_WIDTH 4
+#define DMA_AXIAWCR_RHD_INDEX 20
+#define DMA_AXIAWCR_RHD_WIDTH 2
+#define DMA_AXIAWCR_RDC_INDEX 24
+#define DMA_AXIAWCR_RDC_WIDTH 4
+#define DMA_AXIAWCR_RDD_INDEX 28
+#define DMA_AXIAWCR_RDD_WIDTH 2
+#define DMA_AXIAWRCR_TDWC_INDEX 0
+#define DMA_AXIAWRCR_TDWC_WIDTH 4
+#define DMA_AXIAWRCR_TDWD_INDEX 4
+#define DMA_AXIAWRCR_TDWD_WIDTH 4
+#define DMA_AXIAWRCR_RDRC_INDEX 8
+#define DMA_AXIAWRCR_RDRC_WIDTH 4
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_INTM_INDEX 12
+#define DMA_MR_INTM_WIDTH 2
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_SBMR_WR_OSR_INDEX 24
+#define DMA_SBMR_WR_OSR_WIDTH 6
+#define DMA_SBMR_RD_OSR_INDEX 16
+#define DMA_SBMR_RD_OSR_WIDTH 6
+#define DMA_SBMR_AAL_INDEX 12
+#define DMA_SBMR_AAL_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_BLEN_256_INDEX 7
+#define DMA_SBMR_BLEN_256_WIDTH 1
+#define DMA_SBMR_BLEN_32_INDEX 4
+#define DMA_SBMR_BLEN_32_WIDTH 1
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+
+/* DMA register values */
+#define DMA_DSR_RPS_WIDTH 4
+#define DMA_DSR_TPS_WIDTH 4
+#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+#define DMA_DSR0_RPS_START 8
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_RPS_START 0
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 14
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 15
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_MDIOSCAR 0x0200
+#define MAC_MDIOSCCDR 0x0204
+#define MAC_MDIOISR 0x0214
+#define MAC_MDIOIER 0x0218
+#define MAC_MDIOCL22R 0x0220
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+#define MAC_TSCR 0x0d00
+#define MAC_SSIR 0x0d04
+#define MAC_STSR 0x0d08
+#define MAC_STNR 0x0d0c
+#define MAC_STSUR 0x0d10
+#define MAC_STNUR 0x0d14
+#define MAC_TSAR 0x0d18
+#define MAC_TSSR 0x0d20
+#define MAC_TXSNR 0x0d30
+#define MAC_TXSSR 0x0d34
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADDR64_INDEX 14
+#define MAC_HWF1R_ADDR64_WIDTH 2
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_NUMTC_INDEX 21
+#define MAC_HWF1R_NUMTC_WIDTH 3
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_IER_TSIE_INDEX 12
+#define MAC_IER_TSIE_WIDTH 1
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_SMI_INDEX 1
+#define MAC_ISR_SMI_WIDTH 1
+#define MAC_ISR_LSI_INDEX 0
+#define MAC_ISR_LSI_WIDTH 1
+#define MAC_ISR_LS_INDEX 24
+#define MAC_ISR_LS_WIDTH 2
+#define MAC_ISR_TSIS_INDEX 12
+#define MAC_ISR_TSIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12
+#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1
+#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12
+#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1
+#define MAC_MDIOSCAR_DA_INDEX 21
+#define MAC_MDIOSCAR_DA_WIDTH 5
+#define MAC_MDIOSCAR_PA_INDEX 16
+#define MAC_MDIOSCAR_PA_WIDTH 5
+#define MAC_MDIOSCAR_RA_INDEX 0
+#define MAC_MDIOSCAR_RA_WIDTH 16
+#define MAC_MDIOSCAR_REG_INDEX 0
+#define MAC_MDIOSCAR_REG_WIDTH 21
+#define MAC_MDIOSCCDR_BUSY_INDEX 22
+#define MAC_MDIOSCCDR_BUSY_WIDTH 1
+#define MAC_MDIOSCCDR_CMD_INDEX 16
+#define MAC_MDIOSCCDR_CMD_WIDTH 2
+#define MAC_MDIOSCCDR_CR_INDEX 19
+#define MAC_MDIOSCCDR_CR_WIDTH 3
+#define MAC_MDIOSCCDR_DATA_INDEX 0
+#define MAC_MDIOSCCDR_DATA_WIDTH 16
+#define MAC_MDIOSCCDR_SADDR_INDEX 18
+#define MAC_MDIOSCCDR_SADDR_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HPF_INDEX 10
+#define MAC_PFR_HPF_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PFR_VTFE_INDEX 16
+#define MAC_PFR_VTFE_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_PFCE_INDEX 8
+#define MAC_RFCR_PFCE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RFCR_UP_INDEX 1
+#define MAC_RFCR_UP_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
+#define MAC_SSIR_SNSINC_INDEX 8
+#define MAC_SSIR_SNSINC_WIDTH 8
+#define MAC_SSIR_SSINC_INDEX 16
+#define MAC_SSIR_SSINC_WIDTH 8
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_TSCR_AV8021ASMEN_INDEX 28
+#define MAC_TSCR_AV8021ASMEN_WIDTH 1
+#define MAC_TSCR_SNAPTYPSEL_INDEX 16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
+#define MAC_TSCR_TSADDREG_INDEX 5
+#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSCFUPDT_INDEX 1
+#define MAC_TSCR_TSCFUPDT_WIDTH 1
+#define MAC_TSCR_TSCTRLSSR_INDEX 9
+#define MAC_TSCR_TSCTRLSSR_WIDTH 1
+#define MAC_TSCR_TSENA_INDEX 0
+#define MAC_TSCR_TSENA_WIDTH 1
+#define MAC_TSCR_TSENALL_INDEX 8
+#define MAC_TSCR_TSENALL_WIDTH 1
+#define MAC_TSCR_TSEVNTENA_INDEX 14
+#define MAC_TSCR_TSEVNTENA_WIDTH 1
+#define MAC_TSCR_TSINIT_INDEX 2
+#define MAC_TSCR_TSINIT_WIDTH 1
+#define MAC_TSCR_TSIPENA_INDEX 11
+#define MAC_TSCR_TSIPENA_WIDTH 1
+#define MAC_TSCR_TSIPV4ENA_INDEX 13
+#define MAC_TSCR_TSIPV4ENA_WIDTH 1
+#define MAC_TSCR_TSIPV6ENA_INDEX 12
+#define MAC_TSCR_TSIPV6ENA_WIDTH 1
+#define MAC_TSCR_TSMSTRENA_INDEX 15
+#define MAC_TSCR_TSMSTRENA_WIDTH 1
+#define MAC_TSCR_TSVER2ENA_INDEX 10
+#define MAC_TSCR_TSVER2ENA_WIDTH 1
+#define MAC_TSCR_TXTSSTSM_INDEX 24
+#define MAC_TSCR_TXTSSTSM_WIDTH 1
+#define MAC_TSSR_TXTSC_INDEX 15
+#define MAC_TSSR_TXTSC_WIDTH 1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_VLANHTR_VLHT_INDEX 0
+#define MAC_VLANHTR_VLHT_WIDTH 16
+#define MAC_VLANIR_VLTI_INDEX 20
+#define MAC_VLANIR_VLTI_WIDTH 1
+#define MAC_VLANIR_CSVL_INDEX 19
+#define MAC_VLANIR_CSVL_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_ETV_INDEX 16
+#define MAC_VLANTR_ETV_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VLANTR_VL_INDEX 0
+#define MAC_VLANTR_VL_WIDTH 16
+#define MAC_VLANTR_VTHM_INDEX 25
+#define MAC_VLANTR_VTHM_WIDTH 1
+#define MAC_VLANTR_VTIM_INDEX 17
+#define MAC_VLANTR_VTIM_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+#define MTL_TCPM_INC 4
+#define MTL_TCPM_TC_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x48
+#define MTL_Q_RQFCR 0x50
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_INDEX 16
+#define MTL_Q_RQDR_PRXQ_WIDTH 14
+#define MTL_Q_RQDR_RXQSTS_INDEX 4
+#define MTL_Q_RQDR_RXQSTS_WIDTH 2
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQDR_TRCSTS_INDEX 1
+#define MTL_Q_TQDR_TRCSTS_WIDTH 2
+#define MTL_Q_TQDR_TXQSTS_INDEX 4
+#define MTL_Q_TQDR_TXQSTS_WIDTH 1
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+#define MTL_TC_QWR_QW_INDEX 0
+#define MTL_TC_QWR_QW_WIDTH 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* PCS register offsets */
+#define PCS_V1_WINDOW_SELECT 0x03fc
+#define PCS_V2_WINDOW_DEF 0x9060
+#define PCS_V2_WINDOW_SELECT 0x9064
+
+/* PCS register entry bit positions and sizes */
+#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
+#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14
+#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2
+#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4
+
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1 0x002c
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX 11
+#define SIR0_KR_RT_1_RESET_WIDTH 1
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_CDR_RATE_INDEX 12
+#define SIR1_SPEED_CDR_RATE_WIDTH 4
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
+#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
+#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
+/* MAC Control register offsets */
+#define XP_PROP_0 0x0000
+#define XP_PROP_1 0x0004
+#define XP_PROP_2 0x0008
+#define XP_PROP_3 0x000c
+#define XP_PROP_4 0x0010
+#define XP_PROP_5 0x0014
+#define XP_MAC_ADDR_LO 0x0020
+#define XP_MAC_ADDR_HI 0x0024
+#define XP_ECC_ISR 0x0030
+#define XP_ECC_IER 0x0034
+#define XP_ECC_CNT0 0x003c
+#define XP_ECC_CNT1 0x0040
+#define XP_DRIVER_INT_REQ 0x0060
+#define XP_DRIVER_INT_RO 0x0064
+#define XP_DRIVER_SCRATCH_0 0x0068
+#define XP_DRIVER_SCRATCH_1 0x006c
+#define XP_INT_EN 0x0078
+#define XP_I2C_MUTEX 0x0080
+#define XP_MDIO_MUTEX 0x0084
+
+/* MAC Control register entry bit positions and sizes */
+#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0
+#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1
+#define XP_DRIVER_INT_RO_STATUS_INDEX 0
+#define XP_DRIVER_INT_RO_STATUS_WIDTH 1
+#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0
+#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8
+#define XP_ECC_CNT0_RX_DED_INDEX 24
+#define XP_ECC_CNT0_RX_DED_WIDTH 8
+#define XP_ECC_CNT0_RX_SEC_INDEX 16
+#define XP_ECC_CNT0_RX_SEC_WIDTH 8
+#define XP_ECC_CNT0_TX_DED_INDEX 8
+#define XP_ECC_CNT0_TX_DED_WIDTH 8
+#define XP_ECC_CNT0_TX_SEC_INDEX 0
+#define XP_ECC_CNT0_TX_SEC_WIDTH 8
+#define XP_ECC_CNT1_DESC_DED_INDEX 8
+#define XP_ECC_CNT1_DESC_DED_WIDTH 8
+#define XP_ECC_CNT1_DESC_SEC_INDEX 0
+#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
+#define XP_ECC_IER_DESC_DED_INDEX 0
+#define XP_ECC_IER_DESC_DED_WIDTH 1
+#define XP_ECC_IER_DESC_SEC_INDEX 1
+#define XP_ECC_IER_DESC_SEC_WIDTH 1
+#define XP_ECC_IER_RX_DED_INDEX 2
+#define XP_ECC_IER_RX_DED_WIDTH 1
+#define XP_ECC_IER_RX_SEC_INDEX 3
+#define XP_ECC_IER_RX_SEC_WIDTH 1
+#define XP_ECC_IER_TX_DED_INDEX 4
+#define XP_ECC_IER_TX_DED_WIDTH 1
+#define XP_ECC_IER_TX_SEC_INDEX 5
+#define XP_ECC_IER_TX_SEC_WIDTH 1
+#define XP_ECC_ISR_DESC_DED_INDEX 0
+#define XP_ECC_ISR_DESC_DED_WIDTH 1
+#define XP_ECC_ISR_DESC_SEC_INDEX 1
+#define XP_ECC_ISR_DESC_SEC_WIDTH 1
+#define XP_ECC_ISR_RX_DED_INDEX 2
+#define XP_ECC_ISR_RX_DED_WIDTH 1
+#define XP_ECC_ISR_RX_SEC_INDEX 3
+#define XP_ECC_ISR_RX_SEC_WIDTH 1
+#define XP_ECC_ISR_TX_DED_INDEX 4
+#define XP_ECC_ISR_TX_DED_WIDTH 1
+#define XP_ECC_ISR_TX_SEC_INDEX 5
+#define XP_ECC_ISR_TX_SEC_WIDTH 1
+#define XP_I2C_MUTEX_BUSY_INDEX 31
+#define XP_I2C_MUTEX_BUSY_WIDTH 1
+#define XP_I2C_MUTEX_ID_INDEX 29
+#define XP_I2C_MUTEX_ID_WIDTH 2
+#define XP_I2C_MUTEX_ACTIVE_INDEX 0
+#define XP_I2C_MUTEX_ACTIVE_WIDTH 1
+#define XP_MAC_ADDR_HI_VALID_INDEX 31
+#define XP_MAC_ADDR_HI_VALID_WIDTH 1
+#define XP_PROP_0_CONN_TYPE_INDEX 28
+#define XP_PROP_0_CONN_TYPE_WIDTH 3
+#define XP_PROP_0_MDIO_ADDR_INDEX 16
+#define XP_PROP_0_MDIO_ADDR_WIDTH 5
+#define XP_PROP_0_PORT_ID_INDEX 0
+#define XP_PROP_0_PORT_ID_WIDTH 8
+#define XP_PROP_0_PORT_MODE_INDEX 8
+#define XP_PROP_0_PORT_MODE_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 23
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_1_MAX_RX_DMA_INDEX 24
+#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
+#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5
+#define XP_PROP_1_MAX_TX_DMA_INDEX 16
+#define XP_PROP_1_MAX_TX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0
+#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5
+#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16
+#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0
+#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_3_GPIO_MASK_INDEX 28
+#define XP_PROP_3_GPIO_MASK_WIDTH 4
+#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20
+#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4
+#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16
+#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4
+#define XP_PROP_3_GPIO_RX_LOS_INDEX 24
+#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4
+#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12
+#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4
+#define XP_PROP_3_GPIO_ADDR_INDEX 8
+#define XP_PROP_3_GPIO_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_INDEX 0
+#define XP_PROP_3_MDIO_RESET_WIDTH 2
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2
+#define XP_PROP_4_MUX_ADDR_HI_INDEX 8
+#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5
+#define XP_PROP_4_MUX_ADDR_LO_INDEX 0
+#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3
+#define XP_PROP_4_MUX_CHAN_INDEX 4
+#define XP_PROP_4_MUX_CHAN_WIDTH 3
+#define XP_PROP_4_REDRV_ADDR_INDEX 16
+#define XP_PROP_4_REDRV_ADDR_WIDTH 7
+#define XP_PROP_4_REDRV_IF_INDEX 23
+#define XP_PROP_4_REDRV_IF_WIDTH 1
+#define XP_PROP_4_REDRV_LANE_INDEX 24
+#define XP_PROP_4_REDRV_LANE_WIDTH 3
+#define XP_PROP_4_REDRV_MODEL_INDEX 28
+#define XP_PROP_4_REDRV_MODEL_WIDTH 3
+#define XP_PROP_4_REDRV_PRESENT_INDEX 31
+#define XP_PROP_4_REDRV_PRESENT_WIDTH 1
+
+/* I2C Control register offsets */
+#define IC_CON 0x0000
+#define IC_TAR 0x0004
+#define IC_DATA_CMD 0x0010
+#define IC_INTR_STAT 0x002c
+#define IC_INTR_MASK 0x0030
+#define IC_RAW_INTR_STAT 0x0034
+#define IC_CLR_INTR 0x0040
+#define IC_CLR_TX_ABRT 0x0054
+#define IC_CLR_STOP_DET 0x0060
+#define IC_ENABLE 0x006c
+#define IC_TXFLR 0x0074
+#define IC_RXFLR 0x0078
+#define IC_TX_ABRT_SOURCE 0x0080
+#define IC_ENABLE_STATUS 0x009c
+#define IC_COMP_PARAM_1 0x00f4
+
+/* I2C Control register entry bit positions and sizes */
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8
+#define IC_CON_MASTER_MODE_INDEX 0
+#define IC_CON_MASTER_MODE_WIDTH 1
+#define IC_CON_RESTART_EN_INDEX 5
+#define IC_CON_RESTART_EN_WIDTH 1
+#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9
+#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1
+#define IC_CON_SLAVE_DISABLE_INDEX 6
+#define IC_CON_SLAVE_DISABLE_WIDTH 1
+#define IC_CON_SPEED_INDEX 1
+#define IC_CON_SPEED_WIDTH 2
+#define IC_DATA_CMD_CMD_INDEX 8
+#define IC_DATA_CMD_CMD_WIDTH 1
+#define IC_DATA_CMD_STOP_INDEX 9
+#define IC_DATA_CMD_STOP_WIDTH 1
+#define IC_ENABLE_ABORT_INDEX 1
+#define IC_ENABLE_ABORT_WIDTH 1
+#define IC_ENABLE_EN_INDEX 0
+#define IC_ENABLE_EN_WIDTH 1
+#define IC_ENABLE_STATUS_EN_INDEX 0
+#define IC_ENABLE_STATUS_EN_WIDTH 1
+#define IC_INTR_MASK_TX_EMPTY_INDEX 4
+#define IC_INTR_MASK_TX_EMPTY_WIDTH 1
+#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2
+#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1
+#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9
+#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6
+#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4
+#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1
+
+/* I2C Control register value */
+#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001
+#define IC_TX_ABRT_ARB_LOST 0x1000
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
+#define RX_NORMAL_DESC3_CDA_INDEX 27
+#define RX_NORMAL_DESC3_CDA_WIDTH 1
+#define RX_NORMAL_DESC3_CTXT_INDEX 30
+#define RX_NORMAL_DESC3_CTXT_WIDTH 1
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+
+#define RX_CONTEXT_DESC3_TSA_INDEX 4
+#define RX_CONTEXT_DESC3_TSA_WIDTH 1
+#define RX_CONTEXT_DESC3_TSD_INDEX 6
+#define RX_CONTEXT_DESC3_TSD_WIDTH 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_TTSE_INDEX 30
+#define TX_NORMAL_DESC2_TTSE_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL 0x00ab
+#endif
+
+#ifndef MDIO_PCS_DIG_CTRL
+#define MDIO_PCS_DIG_CTRL 0x8000
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_AN_ADVERTISE
+#define MDIO_VEND2_AN_ADVERTISE 0x0004
+#endif
+
+#ifndef MDIO_VEND2_AN_LP_ABILITY
+#define MDIO_VEND2_AN_LP_ABILITY 0x0005
+#endif
+
+#ifndef MDIO_VEND2_AN_CTRL
+#define MDIO_VEND2_AN_CTRL 0x8001
+#endif
+
+#ifndef MDIO_VEND2_AN_STAT
+#define MDIO_VEND2_AN_STAT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_PMA_CDR_CONTROL
+#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
+#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_RESTART
+#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS6
+#define MDIO_VEND2_CTRL1_SS6 BIT(6)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS13
+#define MDIO_VEND2_CTRL1_SS13 BIT(13)
+#endif
+
+/* MDIO mask values */
+#define AXGBE_AN_CL73_INT_CMPLT BIT(0)
+#define AXGBE_AN_CL73_INC_LINK BIT(1)
+#define AXGBE_AN_CL73_PG_RCV BIT(2)
+#define AXGBE_AN_CL73_INT_MASK 0x07
+
+#define AXGBE_XNP_MCF_NULL_MESSAGE 0x001
+#define AXGBE_XNP_ACK_PROCESSED BIT(12)
+#define AXGBE_XNP_MP_FORMATTED BIT(13)
+#define AXGBE_XNP_NP_EXCHANGE BIT(15)
+
+#define AXGBE_KR_TRAINING_START BIT(0)
+#define AXGBE_KR_TRAINING_ENABLE BIT(1)
+
+#define AXGBE_PCS_CL37_BP BIT(12)
+
+#define AXGBE_AN_CL37_INT_CMPLT BIT(0)
+#define AXGBE_AN_CL37_INT_MASK 0x01
+
+#define AXGBE_AN_CL37_HD_MASK 0x40
+#define AXGBE_AN_CL37_FD_MASK 0x20
+
+#define AXGBE_AN_CL37_PCS_MODE_MASK 0x06
+#define AXGBE_AN_CL37_PCS_MODE_BASEX 0x00
+#define AXGBE_AN_CL37_PCS_MODE_SGMII 0x04
+#define AXGBE_AN_CL37_TX_CONFIG_MASK 0x08
+
+#define AXGBE_PMA_CDR_TRACK_EN_MASK 0x01
+#define AXGBE_PMA_CDR_TRACK_EN_OFF 0x00
+#define AXGBE_PMA_CDR_TRACK_EN_ON 0x01
+
+/*generic*/
+#define __iomem
+
+#define rmb() rte_rmb() /* dpdk rte provided rmb */
+#define wmb() rte_wmb() /* dpdk rte provided wmb */
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef unsigned long long dma_addr_t;
+
+static inline uint32_t low32_value(uint64_t addr)
+{
+ return (addr) & 0x0ffffffff;
+}
+
+static inline uint32_t high32_value(uint64_t addr)
+{
+ return (addr >> 32) & 0x0ffffffff;
+}
+
+/*END*/
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((rte_le_to_cpu_32((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= rte_cpu_to_le_32(~(((0x1 << (_width)) - 1) << (_index)));\
+ (_var) |= rte_cpu_to_le_32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define AXGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define AXGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define AXGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define AXGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define AXGMAC_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+#define AXGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(AXGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+#define AXGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define AXGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+#define AXGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(AXGMAC_MTL_IOREAD((_pdata), (_n), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ rte_write32((_val), (uint8_t *)((_pdata)->xgmac_regs) +\
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+#define AXGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define AXGMAC_DMA_IOREAD(_channel, _reg) \
+ rte_read32((uint8_t *)((_channel)->dma_regs) + (_reg))
+
+#define AXGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(AXGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_channel)->dma_regs) + (_reg))
+
+#define AXGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XPCS_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XPCS32_IOWRITE(_pdata, _off, _val) \
+ rte_write32(_val, \
+ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS32_IOREAD(_pdata, _off) \
+ rte_read32((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS16_IOWRITE(_pdata, _off, _val) \
+ rte_write16(_val, \
+ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS16_IOREAD(_pdata, _off) \
+ rte_read16((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->sir1_regs) + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->sir1_regs) + (_reg))
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of MAC Control registers.
+ */
+#define XP_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XP_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XP_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+#define XP_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XP_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XP_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XP_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of I2C Control registers.
+ */
+#define XI2C_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XI2C_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XI2C_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XI2C_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XI2C_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), (_mmd), (_reg)); \
+ mmd_val &= ~(_mask); \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), (_mmd), (_reg), (mmd_val)); \
+} while (0)
+
+/*
+ * time_after(a,b) returns true if the time a is after time b.
+ *
+ * Do this with "<0" and ">=0" to only test the sign of the result. A
+ * good compiler would generate better code (and a really good compiler
+ * wouldn't care). Gcc is currently neither.
+ */
+#define time_after(a, b) ((long)((b) - (a)) < 0)
+#define time_before(a, b) time_after(b, a)
+
+#define time_after_eq(a, b) ((long)((a) - (b)) >= 0)
+#define time_before_eq(a, b) time_after_eq(b, a)
+
+/*---bitmap support apis---*/
+static inline int axgbe_test_bit(int nr, volatile unsigned long *addr)
+{
+ int res;
+
+ rte_mb();
+ res = ((*addr) & (1UL << nr)) != 0;
+ rte_mb();
+ return res;
+}
+
+static inline void axgbe_set_bit(unsigned int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+static inline void axgbe_clear_bit(int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+static inline int axgbe_test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << nr);
+
+ return __sync_fetch_and_and(addr, ~mask) & mask;
+}
+
+static inline unsigned long msecs_to_timer_cycles(unsigned int m)
+{
+ return rte_get_timer_hz() * (m / 1000);
+}
+
+#endif /* __AXGBE_COMMON_H__ */
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
new file mode 100644
index 00000000..707f1ee9
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -0,0 +1,1103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+#include "axgbe_rxtx.h"
+
+static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
+{
+ return pdata->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_HLEN;
+}
+
+/* query busy bit */
+static int mdio_complete(struct axgbe_port *pdata)
+{
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
+ return 1;
+
+ return 0;
+}
+
+static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ int reg, u16 val)
+{
+ unsigned int mdio_sca, mdio_sccd;
+ uint64_t timeout;
+
+ mdio_sca = 0;
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (mdio_complete(pdata))
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
+ return -ETIMEDOUT;
+}
+
+static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca, mdio_sccd;
+ uint64_t timeout;
+
+ mdio_sca = 0;
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (mdio_complete(pdata))
+ goto success;
+ }
+
+ PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
+ return -ETIMEDOUT;
+
+success:
+ return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
+}
+
+static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
+ enum axgbe_mdio_mode mode)
+{
+ unsigned int reg_val = 0;
+
+ switch (mode) {
+ case AXGBE_MDIO_MODE_CL22:
+ if (port > AXGMAC_MAX_C22_PORT)
+ return -EINVAL;
+ reg_val |= (1 << port);
+ break;
+ case AXGBE_MDIO_MODE_CL45:
+ break;
+ default:
+ return -EINVAL;
+ }
+ AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
+
+ return 0;
+}
+
+static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
+ int prtad __rte_unused, int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and reading 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ pthread_mutex_lock(&pdata->xpcs_mutex);
+
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ mmd_data = XPCS16_IOREAD(pdata, offset);
+
+ pthread_mutex_unlock(&pdata->xpcs_mutex);
+
+ return mmd_data;
+}
+
+static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
+ int prtad __rte_unused,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address, index, offset;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and writing 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ pthread_mutex_lock(&pdata->xpcs_mutex);
+
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ XPCS16_IOWRITE(pdata, offset, mmd_data);
+
+ pthread_mutex_unlock(&pdata->xpcs_mutex);
+}
+
+static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
+ int mmd_reg)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case AXGBE_XPCS_ACCESS_V1:
+ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ return -1;
+ case AXGBE_XPCS_ACCESS_V2:
+ default:
+ return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+ }
+}
+
+static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case AXGBE_XPCS_ACCESS_V1:
+ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ return;
+ case AXGBE_XPCS_ACCESS_V2:
+ default:
+ return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
+ }
+}
+
+static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
+{
+ unsigned int ss;
+
+ switch (speed) {
+ case SPEED_1000:
+ ss = 0x03;
+ break;
+ case SPEED_2500:
+ ss = 0x02;
+ break;
+ case SPEED_10000:
+ ss = 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
+
+ return 0;
+}
+
+static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = RTE_MIN(pdata->tx_q_count,
+ max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = AXGMAC_IOREAD(pdata, reg);
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ unsigned int ehfc = 0;
+
+ /* Flow control thresholds are established */
+ if (pdata->rx_rfd[i])
+ ehfc = 1;
+
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
+ }
+
+ /* Set MAC flow control */
+ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = RTE_MIN(pdata->tx_q_count,
+ max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = AXGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
+{
+ if (pdata->tx_pause)
+ axgbe_enable_tx_flow_control(pdata);
+ else
+ axgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
+{
+ if (pdata->rx_pause)
+ axgbe_enable_rx_flow_control(pdata);
+ else
+ axgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void axgbe_config_flow_control(struct axgbe_port *pdata)
+{
+ axgbe_config_tx_flow_control(pdata);
+ axgbe_config_rx_flow_control(pdata);
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
+}
+
+static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
+ unsigned int queue,
+ unsigned int q_fifo_size)
+{
+ unsigned int frame_fifo_size;
+ unsigned int rfa, rfd;
+
+ frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
+
+ /* This path deals with just maximum frame sizes which are
+ * limited to a jumbo frame of 9,000 (plus headers, etc.)
+ * so we can never exceed the maximum allowable RFA/RFD
+ * values.
+ */
+ if (q_fifo_size <= 2048) {
+ /* rx_rfd to zero to signal no flow control */
+ pdata->rx_rfa[queue] = 0;
+ pdata->rx_rfd[queue] = 0;
+ return;
+ }
+
+ if (q_fifo_size <= 4096) {
+ /* Between 2048 and 4096 */
+ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
+ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= frame_fifo_size) {
+ /* Between 4096 and max-frame */
+ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
+ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= (frame_fifo_size * 3)) {
+ /* Between max-frame and 3 max-frames,
+ * trigger if we get just over a frame of data and
+ * resume when we have just under half a frame left.
+ */
+ rfa = q_fifo_size - frame_fifo_size;
+ rfd = rfa + (frame_fifo_size / 2);
+ } else {
+ /* Above 3 max-frames - trigger when just over
+ * 2 frames of space available
+ */
+ rfa = frame_fifo_size * 2;
+ rfa += AXGMAC_FLOW_CONTROL_UNIT;
+ rfd = rfa + frame_fifo_size;
+ }
+
+ pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
+ pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
+}
+
+static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
+{
+ unsigned int q_fifo_size;
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
+
+ axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
+ }
+}
+
+static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
+ pdata->rx_rfa[i]);
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
+ pdata->rx_rfd[i]);
+ }
+}
+
+static int __axgbe_exit(struct axgbe_port *pdata)
+{
+ unsigned int count = 2000;
+
+ /* Issue a software reset */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ rte_delay_us(10);
+
+ /* Poll Until Poll Condition */
+ while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ rte_delay_us(500);
+
+ if (!count)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int axgbe_exit(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* To guard against possible incorrectly generated interrupts,
+ * issue the software reset twice.
+ */
+ ret = __axgbe_exit(pdata);
+ if (ret)
+ return ret;
+
+ return __axgbe_exit(pdata);
+}
+
+static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
+{
+ unsigned int i, count;
+
+ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ rte_delay_us(500);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void axgbe_config_dma_bus(struct axgbe_port *pdata)
+{
+ /* Set enhanced addressing mode */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+ /* Out standing read/write requests*/
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
+
+ /* Set the System Bus mode */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
+}
+
+static void axgbe_config_dma_cache(struct axgbe_port *pdata)
+{
+ unsigned int arcache, awcache, arwcache;
+
+ arcache = 0;
+ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
+ AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+ awcache = 0;
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
+ AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+
+ arwcache = 0;
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
+ AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
+}
+
+static void axgbe_config_edma_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
+ AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
+}
+
+static int axgbe_config_osp_mode(struct axgbe_port *pdata)
+{
+ /* Force DMA to operate on second packet before closing descriptors
+ * of first packet
+ */
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int axgbe_config_pblx8(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+ }
+ return 0;
+}
+
+static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+ return 0;
+}
+
+static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+ return 0;
+}
+
+static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+
+ rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
+ ~(AXGBE_RX_BUF_ALIGN - 1);
+
+ if (rxq->buf_size > pdata->rx_buf_size)
+ pdata->rx_buf_size = rxq->buf_size;
+
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
+ rxq->buf_size);
+ }
+}
+
+static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ return -EBUSY;
+
+ AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ return 0;
+
+ rte_delay_us(1500);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key;
+ int ret;
+
+ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+ if (!rss_conf->rss_key)
+ key = (unsigned int *)&pdata->rss_key;
+ else
+ key = (unsigned int *)&rss_conf->rss_key;
+
+ while (key_regs--) {
+ ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = axgbe_write_rss_reg(pdata,
+ AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axgbe_enable_rss(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* Program the hash key */
+ ret = axgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = axgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static void axgbe_rss_options(struct axgbe_port *pdata)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ uint64_t rss_hf;
+
+ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ rss_hf = rss_conf->rss_hf;
+
+ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+}
+
+static int axgbe_config_rss(struct axgbe_port *pdata)
+{
+ uint32_t i;
+
+ if (pdata->rss_enable) {
+ /* Initialize RSS hash key and lookup table */
+ uint32_t *key = (uint32_t *)pdata->rss_key;
+
+ for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
+ *key++ = (uint32_t)rte_rand();
+ for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
+ AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->eth_dev->data->nb_rx_queues);
+ axgbe_rss_options(pdata);
+ if (axgbe_enable_rss(pdata)) {
+ PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
+ return -1;
+ }
+ } else {
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+ }
+
+ return 0;
+}
+
+static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts in edge triggered
+ * mode)
+ */
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
+
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static void wrapper_tx_desc_init(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ txq->cur = 0;
+ txq->dirty = 0;
+ /* Update the total number of Tx descriptors */
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
+ /* Update the starting address of descriptor ring */
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
+ high32_value(txq->ring_phys_addr));
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
+ low32_value(txq->ring_phys_addr));
+ }
+}
+
+static int wrapper_rx_desc_init(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ struct rte_mbuf *mbuf;
+ volatile union axgbe_rx_desc *desc;
+ unsigned int i, j;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+
+ /* Initialize software ring entries */
+ rxq->mbuf_alloc = 0;
+ rxq->cur = 0;
+ rxq->dirty = 0;
+ desc = AXGBE_GET_DESC_PT(rxq, 0);
+
+ for (j = 0; j < rxq->nb_desc; j++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
+ (unsigned int)rxq->queue_id, j);
+ axgbe_dev_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->sw_ring[j] = mbuf;
+ /* Mbuf populate */
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ desc->read.baddr =
+ rte_cpu_to_le_64(
+ rte_mbuf_data_iova_default(mbuf));
+ rte_wmb();
+ AXGMAC_SET_BITS_LE(desc->read.desc3,
+ RX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+ rxq->mbuf_alloc++;
+ desc++;
+ }
+ /* Update the total number of Rx descriptors */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
+ rxq->nb_desc - 1);
+ /* Update the starting address of descriptor ring */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
+ high32_value(rxq->ring_phys_addr));
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
+ low32_value(rxq->ring_phys_addr));
+ /* Update the Rx Descriptor Tail Pointer */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (rxq->nb_desc - 1) *
+ sizeof(union axgbe_rx_desc)));
+ }
+ return 0;
+}
+
+static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+ }
+
+ /* Set Rx to strict priority algorithm */
+ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+/*Distrubting fifo size */
+static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo, i;
+
+ fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
+ pdata->hw_feat.rx_fifo_size);
+ q_fifo_size = fifo_size / pdata->rx_q_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result
+ * by 1).
+ */
+ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
+ pdata->fifo = p_fifo;
+
+ /*Calculate and config Flow control threshold*/
+ axgbe_calculate_flow_control_threshold(pdata);
+ axgbe_config_flow_control_threshold(pdata);
+}
+
+static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo, i;
+
+ fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
+ pdata->hw_feat.tx_fifo_size);
+ q_fifo_size = fifo_size / pdata->tx_q_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result
+ * by 1).
+ */
+ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
+}
+
+static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
+{
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int i, j, reg, reg_val;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ if (i < qptc_extra)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ }
+
+ if (pdata->rss_enable) {
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) &&
+ (i != pdata->rx_q_count))
+ continue;
+
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+ }
+}
+
+static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
+ }
+}
+
+static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static void axgbe_config_mac_address(struct axgbe_port *pdata)
+{
+ axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
+}
+
+static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void axgbe_config_mac_speed(struct axgbe_port *pdata)
+{
+ axgbe_set_speed(pdata, pdata->phy_speed);
+}
+
+static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
+{
+ if (pdata->rx_csum_enable)
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+ else
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+}
+
+static int axgbe_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* Flush Tx queues */
+ ret = axgbe_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+ /* Initialize DMA related features */
+ axgbe_config_dma_bus(pdata);
+ axgbe_config_dma_cache(pdata);
+ axgbe_config_edma_control(pdata);
+ axgbe_config_osp_mode(pdata);
+ axgbe_config_pblx8(pdata);
+ axgbe_config_tx_pbl_val(pdata);
+ axgbe_config_rx_pbl_val(pdata);
+ axgbe_config_rx_buffer_size(pdata);
+ axgbe_config_rss(pdata);
+ wrapper_tx_desc_init(pdata);
+ ret = wrapper_rx_desc_init(pdata);
+ if (ret)
+ return ret;
+ axgbe_enable_dma_interrupts(pdata);
+
+ /* Initialize MTL related features */
+ axgbe_config_mtl_mode(pdata);
+ axgbe_config_queue_mapping(pdata);
+ axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ axgbe_config_tx_fifo_size(pdata);
+ axgbe_config_rx_fifo_size(pdata);
+
+ axgbe_enable_mtl_interrupts(pdata);
+
+ /* Initialize MAC related features */
+ axgbe_config_mac_address(pdata);
+ axgbe_config_jumbo_enable(pdata);
+ axgbe_config_flow_control(pdata);
+ axgbe_config_mac_speed(pdata);
+ axgbe_config_checksum_offload(pdata);
+
+ return 0;
+}
+
+void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
+{
+ hw_if->exit = axgbe_exit;
+ hw_if->config_flow_control = axgbe_config_flow_control;
+
+ hw_if->init = axgbe_init;
+
+ hw_if->read_mmd_regs = axgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = axgbe_write_mmd_regs;
+
+ hw_if->set_speed = axgbe_set_speed;
+
+ hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
+ hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
+ hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
+}
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
new file mode 100644
index 00000000..7a3ba2e7
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -0,0 +1,772 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_rxtx.h"
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int axgbe_dev_configure(struct rte_eth_dev *dev);
+static int axgbe_dev_start(struct rte_eth_dev *dev);
+static void axgbe_dev_stop(struct rte_eth_dev *dev);
+static void axgbe_dev_interrupt_handler(void *param);
+static void axgbe_dev_close(struct rte_eth_dev *dev);
+static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void axgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+
+/* The set of PCI devices this driver supports */
+#define AMD_PCI_VENDOR_ID 0x1022
+#define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
+#define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
+
+int axgbe_logtype_init;
+int axgbe_logtype_driver;
+
+static const struct rte_pci_id pci_id_axgbe_map[] = {
+ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
+ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
+ { .vendor_id = 0, },
+};
+
+static struct axgbe_version_data axgbe_v2a = {
+ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 229376,
+ .rx_max_fifo_size = 229376,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .an_cdr_workaround = 1,
+};
+
+static struct axgbe_version_data axgbe_v2b = {
+ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .an_cdr_workaround = 1,
+};
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = AXGBE_MAX_RING_DESC,
+ .nb_min = AXGBE_MIN_RING_DESC,
+ .nb_align = 8,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = AXGBE_MAX_RING_DESC,
+ .nb_min = AXGBE_MIN_RING_DESC,
+ .nb_align = 8,
+};
+
+static const struct eth_dev_ops axgbe_eth_dev_ops = {
+ .dev_configure = axgbe_dev_configure,
+ .dev_start = axgbe_dev_start,
+ .dev_stop = axgbe_dev_stop,
+ .dev_close = axgbe_dev_close,
+ .promiscuous_enable = axgbe_dev_promiscuous_enable,
+ .promiscuous_disable = axgbe_dev_promiscuous_disable,
+ .allmulticast_enable = axgbe_dev_allmulticast_enable,
+ .allmulticast_disable = axgbe_dev_allmulticast_disable,
+ .link_update = axgbe_dev_link_update,
+ .stats_get = axgbe_dev_stats_get,
+ .stats_reset = axgbe_dev_stats_reset,
+ .dev_infos_get = axgbe_dev_info_get,
+ .rx_queue_setup = axgbe_dev_rx_queue_setup,
+ .rx_queue_release = axgbe_dev_rx_queue_release,
+ .tx_queue_setup = axgbe_dev_tx_queue_setup,
+ .tx_queue_release = axgbe_dev_tx_queue_release,
+};
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ return pdata->phy_if.phy_reset(pdata);
+}
+
+/*
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+axgbe_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int dma_isr, dma_ch_isr;
+
+ pdata->phy_if.an_isr(pdata);
+ /*DMA related interrupts*/
+ dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
+ if (dma_isr) {
+ if (dma_isr & 1) {
+ dma_ch_isr =
+ AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR);
+ AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR, dma_ch_isr);
+ }
+ }
+ /* Enable interrupts since disabled after generation*/
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+axgbe_dev_configure(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ /* Checksum offload to hardware */
+ pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CHECKSUM;
+ return 0;
+}
+
+static int
+axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ pdata->rss_enable = 1;
+ else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ pdata->rss_enable = 0;
+ else
+ return -1;
+ return 0;
+}
+
+static int
+axgbe_dev_start(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+ int ret;
+
+ /* Multiqueue RSS */
+ ret = axgbe_dev_rx_mq_config(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
+ return ret;
+ }
+ ret = axgbe_phy_reset(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "phy reset failed\n");
+ return ret;
+ }
+ ret = pdata->hw_if.init(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dev_init failed\n");
+ return ret;
+ }
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+ /* phy start*/
+ pdata->phy_if.phy_start(pdata);
+ axgbe_dev_enable_tx(dev);
+ axgbe_dev_enable_rx(dev);
+
+ axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
+ return 0;
+}
+
+/* Stop device: disable rx and tx functions to allow for reconfiguring. */
+static void
+axgbe_dev_stop(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+ if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
+ return;
+
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_dev_disable_tx(dev);
+ axgbe_dev_disable_rx(dev);
+
+ pdata->phy_if.phy_stop(pdata);
+ pdata->hw_if.exit(pdata);
+ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
+ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+}
+
+/* Clear all resources like TX/RX queues. */
+static void
+axgbe_dev_close(struct rte_eth_dev *dev)
+{
+ axgbe_dev_clear_queues(dev);
+}
+
+static void
+axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
+}
+
+static void
+axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
+}
+
+static void
+axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
+}
+
+static void
+axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ struct rte_eth_link link;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ rte_delay_ms(800);
+
+ pdata->phy_if.phy_status(pdata);
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+ link.link_duplex = pdata->phy.duplex;
+ link.link_status = pdata->phy_link;
+ link.link_speed = pdata->phy_speed;
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == -1)
+ PMD_DRV_LOG(ERR, "No change in link status\n");
+
+ return ret;
+}
+
+static int
+axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ stats->q_ipackets[i] = rxq->pkts;
+ stats->ipackets += rxq->pkts;
+ stats->q_ibytes[i] = rxq->bytes;
+ stats->ibytes += rxq->bytes;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ stats->q_opackets[i] = txq->pkts;
+ stats->opackets += txq->pkts;
+ stats->q_obytes[i] = txq->bytes;
+ stats->obytes += txq->bytes;
+ }
+
+ return 0;
+}
+
+static void
+axgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->pkts = 0;
+ rxq->bytes = 0;
+ rxq->errors = 0;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ txq->pkts = 0;
+ txq->bytes = 0;
+ txq->errors = 0;
+ }
+}
+
+static void
+axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ dev_info->max_rx_queues = pdata->rx_ring_count;
+ dev_info->max_tx_queues = pdata->tx_ring_count;
+ dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
+ dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
+ dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (pdata->hw_feat.rss) {
+ dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
+ dev_info->reta_size = pdata->hw_feat.hash_table_size;
+ dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
+ }
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = AXGBE_RX_FREE_THRESH,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = AXGBE_TX_FREE_THRESH,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+}
+
+static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
+ MAC_HWF1R, ADVTHWORD);
+ hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
+ hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
+ hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
+ hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
+ AUXSNAPNUM);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+
+ /* Translate the fifo sizes into actual numbers */
+ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
+ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
+}
+
+static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
+{
+ axgbe_init_function_ptrs_dev(&pdata->hw_if);
+ axgbe_init_function_ptrs_phy(&pdata->phy_if);
+ axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
+ pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
+}
+
+static void axgbe_set_counts(struct axgbe_port *pdata)
+{
+ /* Set all the function pointers */
+ axgbe_init_all_fptrs(pdata);
+
+ /* Populate the hardware features */
+ axgbe_get_all_hw_features(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_channel_count)
+ pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
+ if (!pdata->rx_max_channel_count)
+ pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
+
+ if (!pdata->tx_max_q_count)
+ pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
+ if (!pdata->rx_max_q_count)
+ pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
+
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues or maximum allowed
+ */
+ pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
+ pdata->tx_max_channel_count);
+ pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
+ pdata->tx_max_q_count);
+
+ pdata->tx_q_count = pdata->tx_ring_count;
+
+ pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
+ pdata->rx_max_channel_count);
+
+ pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
+ pdata->rx_max_q_count);
+}
+
+static void axgbe_default_config(struct axgbe_port *pdata)
+{
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_pbl = DMA_PBL_32;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_ENABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->rx_pbl = DMA_PBL_32;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->power_down = 0;
+}
+
+/*
+ * It returns 0 on success.
+ */
+static int
+eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, mac_lo, mac_hi;
+ int ret;
+
+ eth_dev->dev_ops = &axgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pdata = (struct axgbe_port *)eth_dev->data->dev_private;
+ /* initial state */
+ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ pdata->eth_dev = eth_dev;
+
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pdata->pci_dev = pci_dev;
+
+ pdata->xgmac_regs =
+ (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
+ pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_MAC_PROP_OFFSET);
+ pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_I2C_CTRL_OFFSET);
+ pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
+
+ /* version specific driver data*/
+ if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
+ pdata->vdata = &axgbe_v2a;
+ else
+ pdata->vdata = &axgbe_v2b;
+
+ /* Configure the PCS indirect addressing support */
+ reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
+ pdata->xpcs_window <<= 6;
+ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
+ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
+ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ PMD_INIT_LOG(DEBUG,
+ "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
+ pdata->xpcs_window_size, pdata->xpcs_window_mask);
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
+ /* Retrieve the MAC address */
+ mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
+ mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
+ pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
+ pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
+ pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
+ pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
+ pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
+ pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
+
+ eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
+ ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR,
+ "Failed to alloc %u bytes needed to store MAC addr tbl",
+ ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
+ eth_random_addr(pdata->mac_addr.addr_bytes);
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
+
+ /* Clock settings */
+ pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
+ pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
+
+ /* Set the DMA coherency values */
+ pdata->coherent = 1;
+ pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = AXGBE_DMA_OS_ARCACHE;
+ pdata->awcache = AXGBE_DMA_OS_AWCACHE;
+
+ /* Set the maximum channels and queues */
+ reg = XP_IOREAD(pdata, XP_PROP_1);
+ pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
+ pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
+ pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
+ pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
+
+ /* Set the hardware channel and queue counts */
+ axgbe_set_counts(pdata);
+
+ /* Set the maximum fifo amounts */
+ reg = XP_IOREAD(pdata, XP_PROP_2);
+ pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
+ pdata->tx_max_fifo_size *= 16384;
+ pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
+ pdata->vdata->tx_max_fifo_size);
+ pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
+ pdata->rx_max_fifo_size *= 16384;
+ pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
+ pdata->vdata->rx_max_fifo_size);
+ /* Issue software reset to DMA */
+ ret = pdata->hw_if.exit(pdata);
+ if (ret)
+ PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
+
+ /* Set default configuration data */
+ axgbe_default_config(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_fifo_size)
+ pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
+ if (!pdata->rx_max_fifo_size)
+ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
+
+ pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
+ pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
+ pthread_mutex_init(&pdata->xpcs_mutex, NULL);
+ pthread_mutex_init(&pdata->i2c_mutex, NULL);
+ pthread_mutex_init(&pdata->an_mutex, NULL);
+ pthread_mutex_init(&pdata->phy_mutex, NULL);
+
+ ret = pdata->phy_if.phy_init(pdata);
+ if (ret) {
+ rte_free(eth_dev->data->mac_addrs);
+ return ret;
+ }
+
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ axgbe_dev_interrupt_handler,
+ (void *)eth_dev);
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ return 0;
+}
+
+static int
+eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ /*Free macaddres*/
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ axgbe_dev_clear_queues(eth_dev);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ axgbe_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ return 0;
+}
+
+static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct axgbe_port), eth_axgbe_dev_init);
+}
+
+static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_axgbe_pmd = {
+ .id_table = pci_id_axgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_axgbe_pci_probe,
+ .remove = eth_axgbe_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(axgbe_init_log);
+static void
+axgbe_init_log(void)
+{
+ axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
+ if (axgbe_logtype_init >= 0)
+ rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
+ axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
+ if (axgbe_logtype_driver >= 0)
+ rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
new file mode 100644
index 00000000..b1cd2980
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef RTE_ETH_AXGBE_H_
+#define RTE_ETH_AXGBE_H_
+
+#include <rte_mempool.h>
+#include <rte_lcore.h>
+#include "axgbe_common.h"
+
+#define IRQ 0xff
+#define VLAN_HLEN 4
+
+#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define AXGBE_RX_MIN_BUF_SIZE (ETHER_MAX_LEN + VLAN_HLEN)
+#define AXGBE_MAX_MAC_ADDRS 1
+
+#define AXGBE_RX_BUF_ALIGN 64
+
+#define AXGBE_MAX_DMA_CHANNELS 16
+#define AXGBE_MAX_QUEUES 16
+#define AXGBE_PRIORITY_QUEUES 8
+#define AXGBE_DMA_STOP_TIMEOUT 1
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define AXGBE_DMA_OS_AXDOMAIN 0x2
+#define AXGBE_DMA_OS_ARCACHE 0xb
+#define AXGBE_DMA_OS_AWCACHE 0xf
+
+/* DMA cache settings - System, no caches used */
+#define AXGBE_DMA_SYS_AXDOMAIN 0x3
+#define AXGBE_DMA_SYS_ARCACHE 0x0
+#define AXGBE_DMA_SYS_AWCACHE 0x0
+
+/* DMA channel interrupt modes */
+#define AXGBE_IRQ_MODE_EDGE 0
+#define AXGBE_IRQ_MODE_LEVEL 1
+
+#define AXGBE_DMA_INTERRUPT_MASK 0x31c7
+
+#define AXGMAC_MIN_PACKET 60
+#define AXGMAC_STD_PACKET_MTU 1500
+#define AXGMAC_MAX_STD_PACKET 1518
+#define AXGMAC_JUMBO_PACKET_MTU 9000
+#define AXGMAC_MAX_JUMBO_PACKET 9018
+/* Inter-frame gap + preamble */
+#define AXGMAC_ETH_PREAMBLE (12 + 8)
+
+#define AXGMAC_PFC_DATA_LEN 46
+#define AXGMAC_PFC_DELAYS 14000
+
+/* PCI BAR mapping */
+#define AXGBE_AXGMAC_BAR 0
+#define AXGBE_XPCS_BAR 1
+#define AXGBE_MAC_PROP_OFFSET 0x1d000
+#define AXGBE_I2C_CTRL_OFFSET 0x1e000
+
+/* PCI clock frequencies */
+#define AXGBE_V2_DMA_CLOCK_FREQ 500000000
+#define AXGBE_V2_PTP_CLOCK_FREQ 125000000
+
+#define AXGMAC_FIFO_MIN_ALLOC 2048
+#define AXGMAC_FIFO_UNIT 256
+#define AXGMAC_FIFO_ALIGN(_x) \
+ (((_x) + AXGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
+#define AXGMAC_FIFO_FC_OFF 2048
+#define AXGMAC_FIFO_FC_MIN 4096
+
+#define AXGBE_TC_MIN_QUANTUM 10
+
+/* Flow control queue count */
+#define AXGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* Flow control threshold units */
+#define AXGMAC_FLOW_CONTROL_UNIT 512
+#define AXGMAC_FLOW_CONTROL_ALIGN(_x) \
+ (((_x) + AXGMAC_FLOW_CONTROL_UNIT - 1) & \
+ ~(AXGMAC_FLOW_CONTROL_UNIT - 1))
+#define AXGMAC_FLOW_CONTROL_VALUE(_x) \
+ (((_x) < 1024) ? 0 : ((_x) / AXGMAC_FLOW_CONTROL_UNIT) - 2)
+#define AXGMAC_FLOW_CONTROL_MAX 33280
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define AXGBE_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define AXGBE_RSS_OFFLOAD ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP)
+
+#define AXGBE_RSS_HASH_KEY_SIZE 40
+#define AXGBE_RSS_MAX_TABLE_SIZE 256
+#define AXGBE_RSS_LOOKUP_TABLE_TYPE 0
+#define AXGBE_RSS_HASH_KEY_TYPE 1
+
+/* Auto-negotiation */
+#define AXGBE_AN_MS_TIMEOUT 500
+#define AXGBE_LINK_TIMEOUT 5
+
+#define AXGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define AXGBE_SGMII_AN_LINK_SPEED_100 0x04
+#define AXGBE_SGMII_AN_LINK_SPEED_1000 0x08
+#define AXGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+
+/* ECC correctable error notification window (seconds) */
+#define AXGBE_ECC_LIMIT 60
+
+/* MDIO port types */
+#define AXGMAC_MAX_C22_PORT 3
+
+/* Helper macro for descriptor handling
+ * Always use AXGBE_GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define AXGBE_GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+struct axgbe_port;
+
+enum axgbe_state {
+ AXGBE_DOWN,
+ AXGBE_LINK_INIT,
+ AXGBE_LINK_ERR,
+ AXGBE_STOPPED,
+};
+
+enum axgbe_int {
+ AXGMAC_INT_DMA_CH_SR_TI,
+ AXGMAC_INT_DMA_CH_SR_TPS,
+ AXGMAC_INT_DMA_CH_SR_TBU,
+ AXGMAC_INT_DMA_CH_SR_RI,
+ AXGMAC_INT_DMA_CH_SR_RBU,
+ AXGMAC_INT_DMA_CH_SR_RPS,
+ AXGMAC_INT_DMA_CH_SR_TI_RI,
+ AXGMAC_INT_DMA_CH_SR_FBE,
+ AXGMAC_INT_DMA_ALL,
+};
+
+enum axgbe_int_state {
+ AXGMAC_INT_STATE_SAVE,
+ AXGMAC_INT_STATE_RESTORE,
+};
+
+enum axgbe_ecc_sec {
+ AXGBE_ECC_SEC_TX,
+ AXGBE_ECC_SEC_RX,
+ AXGBE_ECC_SEC_DESC,
+};
+
+enum axgbe_speed {
+ AXGBE_SPEED_1000 = 0,
+ AXGBE_SPEED_2500,
+ AXGBE_SPEED_10000,
+ AXGBE_SPEEDS,
+};
+
+enum axgbe_xpcs_access {
+ AXGBE_XPCS_ACCESS_V1 = 0,
+ AXGBE_XPCS_ACCESS_V2,
+};
+
+enum axgbe_an_mode {
+ AXGBE_AN_MODE_CL73 = 0,
+ AXGBE_AN_MODE_CL73_REDRV,
+ AXGBE_AN_MODE_CL37,
+ AXGBE_AN_MODE_CL37_SGMII,
+ AXGBE_AN_MODE_NONE,
+};
+
+enum axgbe_an {
+ AXGBE_AN_READY = 0,
+ AXGBE_AN_PAGE_RECEIVED,
+ AXGBE_AN_INCOMPAT_LINK,
+ AXGBE_AN_COMPLETE,
+ AXGBE_AN_NO_LINK,
+ AXGBE_AN_ERROR,
+};
+
+enum axgbe_rx {
+ AXGBE_RX_BPA = 0,
+ AXGBE_RX_XNP,
+ AXGBE_RX_COMPLETE,
+ AXGBE_RX_ERROR,
+};
+
+enum axgbe_mode {
+ AXGBE_MODE_KX_1000 = 0,
+ AXGBE_MODE_KX_2500,
+ AXGBE_MODE_KR,
+ AXGBE_MODE_X,
+ AXGBE_MODE_SGMII_100,
+ AXGBE_MODE_SGMII_1000,
+ AXGBE_MODE_SFI,
+ AXGBE_MODE_UNKNOWN,
+};
+
+enum axgbe_speedset {
+ AXGBE_SPEEDSET_1000_10000 = 0,
+ AXGBE_SPEEDSET_2500_10000,
+};
+
+enum axgbe_mdio_mode {
+ AXGBE_MDIO_MODE_NONE = 0,
+ AXGBE_MDIO_MODE_CL22,
+ AXGBE_MDIO_MODE_CL45,
+};
+
+struct axgbe_phy {
+ uint32_t supported;
+ uint32_t advertising;
+ uint32_t lp_advertising;
+
+ int address;
+
+ int autoneg;
+ int speed;
+ int duplex;
+
+ int link;
+
+ int pause_autoneg;
+ int tx_pause;
+ int rx_pause;
+};
+
+enum axgbe_i2c_cmd {
+ AXGBE_I2C_CMD_READ = 0,
+ AXGBE_I2C_CMD_WRITE,
+};
+
+struct axgbe_i2c_op {
+ enum axgbe_i2c_cmd cmd;
+
+ unsigned int target;
+
+ uint8_t *buf;
+ unsigned int len;
+};
+
+struct axgbe_i2c_op_state {
+ struct axgbe_i2c_op *op;
+
+ unsigned int tx_len;
+ unsigned char *tx_buf;
+
+ unsigned int rx_len;
+ unsigned char *rx_buf;
+
+ unsigned int tx_abort_source;
+
+ int ret;
+};
+
+struct axgbe_i2c {
+ unsigned int started;
+ unsigned int max_speed_mode;
+ unsigned int rx_fifo_size;
+ unsigned int tx_fifo_size;
+
+ struct axgbe_i2c_op_state op_state;
+};
+
+struct axgbe_hw_if {
+ void (*config_flow_control)(struct axgbe_port *);
+ int (*config_rx_mode)(struct axgbe_port *);
+
+ int (*init)(struct axgbe_port *);
+
+ int (*read_mmd_regs)(struct axgbe_port *, int, int);
+ void (*write_mmd_regs)(struct axgbe_port *, int, int, int);
+ int (*set_speed)(struct axgbe_port *, int);
+
+ int (*set_ext_mii_mode)(struct axgbe_port *, unsigned int,
+ enum axgbe_mdio_mode);
+ int (*read_ext_mii_regs)(struct axgbe_port *, int, int);
+ int (*write_ext_mii_regs)(struct axgbe_port *, int, int, uint16_t);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct axgbe_port *);
+ int (*config_rx_flow_control)(struct axgbe_port *);
+
+ int (*exit)(struct axgbe_port *);
+};
+
+/* This structure represents implementation specific routines for an
+ * implementation of a PHY. All routines are required unless noted below.
+ * Optional routines:
+ * kr_training_pre, kr_training_post
+ */
+struct axgbe_phy_impl_if {
+ /* Perform Setup/teardown actions */
+ int (*init)(struct axgbe_port *);
+ void (*exit)(struct axgbe_port *);
+
+ /* Perform start/stop specific actions */
+ int (*reset)(struct axgbe_port *);
+ int (*start)(struct axgbe_port *);
+ void (*stop)(struct axgbe_port *);
+
+ /* Return the link status */
+ int (*link_status)(struct axgbe_port *, int *);
+
+ /* Indicate if a particular speed is valid */
+ int (*valid_speed)(struct axgbe_port *, int);
+
+ /* Check if the specified mode can/should be used */
+ bool (*use_mode)(struct axgbe_port *, enum axgbe_mode);
+ /* Switch the PHY into various modes */
+ void (*set_mode)(struct axgbe_port *, enum axgbe_mode);
+ /* Retrieve mode needed for a specific speed */
+ enum axgbe_mode (*get_mode)(struct axgbe_port *, int);
+ /* Retrieve new/next mode when trying to auto-negotiate */
+ enum axgbe_mode (*switch_mode)(struct axgbe_port *);
+ /* Retrieve current mode */
+ enum axgbe_mode (*cur_mode)(struct axgbe_port *);
+
+ /* Retrieve current auto-negotiation mode */
+ enum axgbe_an_mode (*an_mode)(struct axgbe_port *);
+
+ /* Configure auto-negotiation settings */
+ int (*an_config)(struct axgbe_port *);
+
+ /* Set/override auto-negotiation advertisement settings */
+ unsigned int (*an_advertising)(struct axgbe_port *port);
+
+ /* Process results of auto-negotiation */
+ enum axgbe_mode (*an_outcome)(struct axgbe_port *);
+
+ /* Pre/Post auto-negotiation support */
+ void (*an_pre)(struct axgbe_port *port);
+ void (*an_post)(struct axgbe_port *port);
+
+ /* Pre/Post KR training enablement support */
+ void (*kr_training_pre)(struct axgbe_port *);
+ void (*kr_training_post)(struct axgbe_port *);
+};
+
+struct axgbe_phy_if {
+ /* For PHY setup/teardown */
+ int (*phy_init)(struct axgbe_port *);
+ void (*phy_exit)(struct axgbe_port *);
+
+ /* For PHY support when setting device up/down */
+ int (*phy_reset)(struct axgbe_port *);
+ int (*phy_start)(struct axgbe_port *);
+ void (*phy_stop)(struct axgbe_port *);
+
+ /* For PHY support while device is up */
+ void (*phy_status)(struct axgbe_port *);
+ int (*phy_config_aneg)(struct axgbe_port *);
+
+ /* For PHY settings validation */
+ int (*phy_valid_speed)(struct axgbe_port *, int);
+ /* For single interrupt support */
+ void (*an_isr)(struct axgbe_port *);
+ /* PHY implementation specific services */
+ struct axgbe_phy_impl_if phy_impl;
+};
+
+struct axgbe_i2c_if {
+ /* For initial I2C setup */
+ int (*i2c_init)(struct axgbe_port *);
+
+ /* For I2C support when setting device up/down */
+ int (*i2c_start)(struct axgbe_port *);
+ void (*i2c_stop)(struct axgbe_port *);
+
+ /* For performing I2C operations */
+ int (*i2c_xfer)(struct axgbe_port *, struct axgbe_i2c_op *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct axgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct axgbe_version_data {
+ void (*init_function_ptrs_phy_impl)(struct axgbe_phy_if *);
+ enum axgbe_xpcs_access xpcs_access;
+ unsigned int mmc_64bit;
+ unsigned int tx_max_fifo_size;
+ unsigned int rx_max_fifo_size;
+ unsigned int tx_tstamp_workaround;
+ unsigned int ecc_support;
+ unsigned int i2c_support;
+ unsigned int an_cdr_workaround;
+};
+
+/*
+ * Structure to store private data for each port.
+ */
+struct axgbe_port {
+ /* Ethdev where port belongs*/
+ struct rte_eth_dev *eth_dev;
+ /* Pci dev info */
+ const struct rte_pci_device *pci_dev;
+ /* Version related data */
+ struct axgbe_version_data *vdata;
+
+ /* AXGMAC/XPCS related mmio registers */
+ void *xgmac_regs; /* AXGMAC CSRs */
+ void *xpcs_regs; /* XPCS MMD registers */
+ void *xprop_regs; /* AXGBE property registers */
+ void *xi2c_regs; /* AXGBE I2C CSRs */
+
+ bool cdr_track_early;
+ /* XPCS indirect addressing lock */
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
+ unsigned int xpcs_window;
+ unsigned int xpcs_window_size;
+ unsigned int xpcs_window_mask;
+
+ /* Flags representing axgbe_state */
+ unsigned long dev_state;
+
+ struct axgbe_hw_if hw_if;
+ struct axgbe_phy_if phy_if;
+ struct axgbe_i2c_if i2c_if;
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+ unsigned int axdomain;
+ unsigned int arcache;
+ unsigned int awcache;
+
+ unsigned int tx_max_channel_count;
+ unsigned int rx_max_channel_count;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ unsigned int tx_max_q_count;
+ unsigned int rx_max_q_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+ unsigned int tx_max_fifo_size;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+ unsigned int rx_max_fifo_size;
+ unsigned int rx_buf_size;
+
+ /* Device clocks */
+ unsigned long sysclk_rate;
+ unsigned long ptpclk_rate;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+ /* Current PHY settings */
+ int phy_link;
+ int phy_speed;
+
+ pthread_mutex_t xpcs_mutex;
+ pthread_mutex_t i2c_mutex;
+ pthread_mutex_t an_mutex;
+ pthread_mutex_t phy_mutex;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+ unsigned int rx_rfa[AXGBE_MAX_QUEUES];
+ unsigned int rx_rfd[AXGBE_MAX_QUEUES];
+ unsigned int fifo;
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[AXGBE_RSS_HASH_KEY_SIZE];
+ uint32_t rss_table[AXGBE_RSS_MAX_TABLE_SIZE];
+ uint32_t rss_options;
+ int rss_enable;
+
+ /* Hardware features of the device */
+ struct axgbe_hw_features hw_feat;
+
+ struct ether_addr mac_addr;
+
+ /* Software Tx/Rx structure pointers*/
+ void **rx_queues;
+ void **tx_queues;
+
+ /* MDIO/PHY related settings */
+ unsigned int phy_started;
+ void *phy_data;
+ struct axgbe_phy phy;
+ int mdio_mmd;
+ unsigned long link_check;
+ volatile int mdio_completion;
+
+ unsigned int kr_redrv;
+
+ /* Auto-negotiation atate machine support */
+ unsigned int an_int;
+ unsigned int an_status;
+ enum axgbe_an an_result;
+ enum axgbe_an an_state;
+ enum axgbe_rx kr_state;
+ enum axgbe_rx kx_state;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
+ enum axgbe_an_mode an_mode;
+
+ /* I2C support */
+ struct axgbe_i2c i2c;
+ volatile int i2c_complete;
+
+ /* CRC stripping by H/w for Rx packet*/
+ int crc_strip_enable;
+ /* csum enable to hardware */
+ uint32_t rx_csum_enable;
+};
+
+void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if);
+void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if);
+void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if);
+void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if);
+
+#endif /* RTE_ETH_AXGBE_H_ */
diff --git a/drivers/net/axgbe/axgbe_i2c.c b/drivers/net/axgbe/axgbe_i2c.c
new file mode 100644
index 00000000..204ec367
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_i2c.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+
+#define AXGBE_ABORT_COUNT 500
+#define AXGBE_DISABLE_COUNT 1000
+
+#define AXGBE_STD_SPEED 1
+
+#define AXGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX)
+#define AXGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX)
+#define AXGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX)
+#define AXGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX)
+#define AXGBE_DEFAULT_INT_MASK (AXGBE_INTR_RX_FULL | \
+ AXGBE_INTR_TX_EMPTY | \
+ AXGBE_INTR_TX_ABRT | \
+ AXGBE_INTR_STOP_DET)
+
+#define AXGBE_I2C_READ BIT(8)
+#define AXGBE_I2C_STOP BIT(9)
+
+static int axgbe_i2c_abort(struct axgbe_port *pdata)
+{
+ unsigned int wait = AXGBE_ABORT_COUNT;
+
+ /* Must be enabled to recognize the abort request */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1);
+
+ /* Issue the abort */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1);
+
+ while (wait--) {
+ if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT))
+ return 0;
+ rte_delay_us(500);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_i2c_set_enable(struct axgbe_port *pdata, bool enable)
+{
+ unsigned int wait = AXGBE_DISABLE_COUNT;
+ unsigned int mode = enable ? 1 : 0;
+
+ while (wait--) {
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode);
+ if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode)
+ return 0;
+
+ rte_delay_us(100);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_i2c_disable(struct axgbe_port *pdata)
+{
+ unsigned int ret;
+
+ ret = axgbe_i2c_set_enable(pdata, false);
+ if (ret) {
+ /* Disable failed, try an abort */
+ ret = axgbe_i2c_abort(pdata);
+ if (ret)
+ return ret;
+
+ /* Abort succeeded, try to disable again */
+ ret = axgbe_i2c_set_enable(pdata, false);
+ }
+
+ return ret;
+}
+
+static int axgbe_i2c_enable(struct axgbe_port *pdata)
+{
+ return axgbe_i2c_set_enable(pdata, true);
+}
+
+static void axgbe_i2c_clear_all_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOREAD(pdata, IC_CLR_INTR);
+}
+
+static void axgbe_i2c_disable_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, 0);
+}
+
+static void axgbe_i2c_enable_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, AXGBE_DEFAULT_INT_MASK);
+}
+
+static void axgbe_i2c_write(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int tx_slots;
+ unsigned int cmd;
+
+ /* Configured to never receive Rx overflows, so fill up Tx fifo */
+ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR);
+ while (tx_slots && state->tx_len) {
+ if (state->op->cmd == AXGBE_I2C_CMD_READ)
+ cmd = AXGBE_I2C_READ;
+ else
+ cmd = *state->tx_buf++;
+
+ if (state->tx_len == 1)
+ XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1);
+
+ XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd);
+
+ tx_slots--;
+ state->tx_len--;
+ }
+
+ /* No more Tx operations, so ignore TX_EMPTY and return */
+ if (!state->tx_len)
+ XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0);
+}
+
+static void axgbe_i2c_read(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int rx_slots;
+
+ /* Anything to be read? */
+ if (state->op->cmd != AXGBE_I2C_CMD_READ)
+ return;
+
+ rx_slots = XI2C_IOREAD(pdata, IC_RXFLR);
+ while (rx_slots && state->rx_len) {
+ *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD);
+ state->rx_len--;
+ rx_slots--;
+ }
+}
+
+static void axgbe_i2c_clear_isr_interrupts(struct axgbe_port *pdata,
+ unsigned int isr)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+
+ if (isr & AXGBE_INTR_TX_ABRT) {
+ state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE);
+ XI2C_IOREAD(pdata, IC_CLR_TX_ABRT);
+ }
+
+ if (isr & AXGBE_INTR_STOP_DET)
+ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
+}
+
+static int axgbe_i2c_isr(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int isr;
+
+ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
+
+ axgbe_i2c_clear_isr_interrupts(pdata, isr);
+
+ if (isr & AXGBE_INTR_TX_ABRT) {
+ axgbe_i2c_disable_interrupts(pdata);
+
+ state->ret = -EIO;
+ goto out;
+ }
+
+ /* Check for data in the Rx fifo */
+ axgbe_i2c_read(pdata);
+
+ /* Fill up the Tx fifo next */
+ axgbe_i2c_write(pdata);
+
+out:
+ /* Complete on an error or STOP condition */
+ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
+ return 1;
+
+ return 0;
+}
+
+static void axgbe_i2c_set_mode(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_CON);
+ XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1);
+ XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1);
+ XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1);
+ XI2C_SET_BITS(reg, IC_CON, SPEED, AXGBE_STD_SPEED);
+ XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1);
+ XI2C_IOWRITE(pdata, IC_CON, reg);
+}
+
+static void axgbe_i2c_get_features(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c *i2c = &pdata->i2c;
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1);
+ i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ MAX_SPEED_MODE);
+ i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ RX_BUFFER_DEPTH);
+ i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ TX_BUFFER_DEPTH);
+}
+
+static void axgbe_i2c_set_target(struct axgbe_port *pdata, unsigned int addr)
+{
+ XI2C_IOWRITE(pdata, IC_TAR, addr);
+}
+
+static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ int ret;
+ uint64_t timeout;
+
+ pthread_mutex_lock(&pdata->i2c_mutex);
+ ret = axgbe_i2c_disable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ axgbe_i2c_set_target(pdata, op->target);
+
+ memset(state, 0, sizeof(*state));
+ state->op = op;
+ state->tx_len = op->len;
+ state->tx_buf = (unsigned char *)op->buf;
+ state->rx_len = op->len;
+ state->rx_buf = (unsigned char *)op->buf;
+
+ axgbe_i2c_clear_all_interrupts(pdata);
+ ret = axgbe_i2c_enable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to enable i2c master\n");
+ return ret;
+ }
+
+ /* Enabling the interrupts will cause the TX FIFO empty interrupt to
+ * fire and begin to process the command via the ISR.
+ */
+ axgbe_i2c_enable_interrupts(pdata);
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (XI2C_IOREAD(pdata, IC_RAW_INTR_STAT)) {
+ if (axgbe_i2c_isr(pdata))
+ goto success;
+ }
+ }
+
+ PMD_DRV_LOG(ERR, "i2c operation timed out\n");
+ axgbe_i2c_disable_interrupts(pdata);
+ axgbe_i2c_disable(pdata);
+ ret = -ETIMEDOUT;
+ goto unlock;
+
+success:
+ ret = state->ret;
+ if (ret) {
+ if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK)
+ ret = -ENOTCONN;
+ else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST)
+ ret = -EAGAIN;
+ }
+
+unlock:
+ pthread_mutex_unlock(&pdata->i2c_mutex);
+ return ret;
+}
+
+static void axgbe_i2c_stop(struct axgbe_port *pdata)
+{
+ if (!pdata->i2c.started)
+ return;
+
+ pdata->i2c.started = 0;
+ axgbe_i2c_disable_interrupts(pdata);
+ axgbe_i2c_disable(pdata);
+ axgbe_i2c_clear_all_interrupts(pdata);
+}
+
+static int axgbe_i2c_start(struct axgbe_port *pdata)
+{
+ if (pdata->i2c.started)
+ return 0;
+
+ pdata->i2c.started = 1;
+
+ return 0;
+}
+
+static int axgbe_i2c_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ axgbe_i2c_disable_interrupts(pdata);
+
+ ret = axgbe_i2c_disable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ axgbe_i2c_get_features(pdata);
+
+ axgbe_i2c_set_mode(pdata);
+
+ axgbe_i2c_clear_all_interrupts(pdata);
+
+ return 0;
+}
+
+void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if)
+{
+ i2c_if->i2c_init = axgbe_i2c_init;
+ i2c_if->i2c_start = axgbe_i2c_start;
+ i2c_if->i2c_stop = axgbe_i2c_stop;
+ i2c_if->i2c_xfer = axgbe_i2c_xfer;
+}
diff --git a/drivers/net/axgbe/axgbe_logs.h b/drivers/net/axgbe/axgbe_logs.h
new file mode 100644
index 00000000..d1487017
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_logs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _AXGBE_LOGS_H_
+#define _AXGBE_LOGS_H_
+
+#include <stdio.h>
+
+extern int axgbe_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, axgbe_logtype_init, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#ifdef RTE_LIBRTE_AXGBE_PMD_DEBUG
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+extern int axgbe_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, axgbe_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#endif /* _AXGBE_LOGS_H_ */
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
new file mode 100644
index 00000000..2721e5cc
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -0,0 +1,1066 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+static void axgbe_an37_clear_interrupts(struct axgbe_port *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ reg &= ~AXGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
+}
+
+static void axgbe_an37_disable_interrupts(struct axgbe_port *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg &= ~AXGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg &= ~AXGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
+}
+
+static void axgbe_an73_clear_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+}
+
+static void axgbe_an73_disable_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+}
+
+static void axgbe_an73_enable_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
+ AXGBE_AN_CL73_INT_MASK);
+}
+
+static void axgbe_an_enable_interrupts(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_enable_interrupts(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MOD_37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_clear_interrupts_all(struct axgbe_port *pdata)
+{
+ axgbe_an73_clear_interrupts(pdata);
+ axgbe_an37_clear_interrupts(pdata);
+}
+
+static void axgbe_an73_enable_kr_training(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+ reg |= AXGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void axgbe_an73_disable_kr_training(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+ reg &= ~AXGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void axgbe_kr_mode(struct axgbe_port *pdata)
+{
+ /* Enable KR training */
+ axgbe_an73_enable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KR);
+}
+
+static void axgbe_kx_2500_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 2.5G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_2500);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_2500);
+}
+
+static void axgbe_kx_1000_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_1000);
+}
+
+static void axgbe_sfi_mode(struct axgbe_port *pdata)
+{
+ /* If a KR re-driver is present, change to KR mode instead */
+ if (pdata->kr_redrv)
+ return axgbe_kr_mode(pdata);
+
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SFI);
+}
+
+static void axgbe_x_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_X);
+}
+
+static void axgbe_sgmii_1000_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_1000);
+}
+
+static void axgbe_sgmii_100_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_100);
+}
+
+static enum axgbe_mode axgbe_cur_mode(struct axgbe_port *pdata)
+{
+ return pdata->phy_if.phy_impl.cur_mode(pdata);
+}
+
+static bool axgbe_in_kr_mode(struct axgbe_port *pdata)
+{
+ return axgbe_cur_mode(pdata) == AXGBE_MODE_KR;
+}
+
+static void axgbe_change_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ axgbe_kx_1000_mode(pdata);
+ break;
+ case AXGBE_MODE_KX_2500:
+ axgbe_kx_2500_mode(pdata);
+ break;
+ case AXGBE_MODE_KR:
+ axgbe_kr_mode(pdata);
+ break;
+ case AXGBE_MODE_SGMII_100:
+ axgbe_sgmii_100_mode(pdata);
+ break;
+ case AXGBE_MODE_SGMII_1000:
+ axgbe_sgmii_1000_mode(pdata);
+ break;
+ case AXGBE_MODE_X:
+ axgbe_x_mode(pdata);
+ break;
+ case AXGBE_MODE_SFI:
+ axgbe_sfi_mode(pdata);
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)\n", mode);
+ }
+}
+
+static void axgbe_switch_mode(struct axgbe_port *pdata)
+{
+ axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
+}
+
+static void axgbe_set_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ if (mode == axgbe_cur_mode(pdata))
+ return;
+
+ axgbe_change_mode(pdata, mode);
+}
+
+static bool axgbe_use_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ return pdata->phy_if.phy_impl.use_mode(pdata, mode);
+}
+
+static void axgbe_an37_set(struct axgbe_port *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
+ reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (enable)
+ reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (restart)
+ reg |= MDIO_VEND2_CTRL1_AN_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+}
+
+static void axgbe_an37_disable(struct axgbe_port *pdata)
+{
+ axgbe_an37_set(pdata, false, false);
+ axgbe_an37_disable_interrupts(pdata);
+}
+
+static void axgbe_an73_set(struct axgbe_port *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ reg |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ reg |= MDIO_AN_CTRL1_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+static void axgbe_an73_restart(struct axgbe_port *pdata)
+{
+ axgbe_an73_enable_interrupts(pdata);
+ axgbe_an73_set(pdata, true, true);
+}
+
+static void axgbe_an73_disable(struct axgbe_port *pdata)
+{
+ axgbe_an73_set(pdata, false, false);
+ axgbe_an73_disable_interrupts(pdata);
+ pdata->an_start = 0;
+}
+
+static void axgbe_an_restart(struct axgbe_port *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_pre)
+ pdata->phy_if.phy_impl.an_pre(pdata);
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_restart(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_disable(struct axgbe_port *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_disable(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_disable_all(struct axgbe_port *pdata)
+{
+ axgbe_an73_disable(pdata);
+ axgbe_an37_disable(pdata);
+}
+
+static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg, reg;
+
+ *state = AXGBE_RX_COMPLETE;
+
+ /* If we're not in KR mode then we're done */
+ if (!axgbe_in_kr_mode(pdata))
+ return AXGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+ reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ reg |= pdata->fec_ability;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+ /* Start KR training */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (reg & AXGBE_KR_TRAINING_ENABLE) {
+ if (pdata->phy_if.phy_impl.kr_training_pre)
+ pdata->phy_if.phy_impl.kr_training_pre(pdata);
+
+ reg |= AXGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ reg);
+
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
+ }
+
+ return AXGBE_AN_PAGE_RECEIVED;
+}
+
+static enum axgbe_an axgbe_an73_tx_xnp(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ u16 msg;
+
+ *state = AXGBE_RX_XNP;
+
+ msg = AXGBE_XNP_MCF_NULL_MESSAGE;
+ msg |= AXGBE_XNP_MP_FORMATTED;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return AXGBE_AN_PAGE_RECEIVED;
+}
+
+static enum axgbe_an axgbe_an73_rx_bpa(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int link_support;
+ unsigned int reg, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = axgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+ if (!(reg & link_support))
+ return AXGBE_AN_INCOMPAT_LINK;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
+ ? axgbe_an73_tx_xnp(pdata, state)
+ : axgbe_an73_tx_training(pdata, state);
+}
+
+static enum axgbe_an axgbe_an73_rx_xnp(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
+ ? axgbe_an73_tx_xnp(pdata, state)
+ : axgbe_an73_tx_training(pdata, state);
+}
+
+static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata)
+{
+ enum axgbe_rx *state;
+ unsigned long an_timeout;
+ enum axgbe_an ret;
+ unsigned long ticks;
+
+ if (!pdata->an_start) {
+ pdata->an_start = rte_get_timer_cycles();
+ } else {
+ an_timeout = pdata->an_start +
+ msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT);
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, an_timeout)) {
+ /* Auto-negotiation timed out, reset state */
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+
+ pdata->an_start = rte_get_timer_cycles();
+ }
+ }
+
+ state = axgbe_in_kr_mode(pdata) ? &pdata->kr_state
+ : &pdata->kx_state;
+
+ switch (*state) {
+ case AXGBE_RX_BPA:
+ ret = axgbe_an73_rx_bpa(pdata, state);
+ break;
+ case AXGBE_RX_XNP:
+ ret = axgbe_an73_rx_xnp(pdata, state);
+ break;
+ default:
+ ret = AXGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata)
+{
+ /* Be sure we aren't looping trying to negotiate */
+ if (axgbe_in_kr_mode(pdata)) {
+ pdata->kr_state = AXGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+ !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ return AXGBE_AN_NO_LINK;
+
+ if (pdata->kx_state != AXGBE_RX_BPA)
+ return AXGBE_AN_NO_LINK;
+ } else {
+ pdata->kx_state = AXGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
+ return AXGBE_AN_NO_LINK;
+
+ if (pdata->kr_state != AXGBE_RX_BPA)
+ return AXGBE_AN_NO_LINK;
+ }
+
+ axgbe_an_disable(pdata);
+ axgbe_switch_mode(pdata);
+ axgbe_an_restart(pdata);
+
+ return AXGBE_AN_INCOMPAT_LINK;
+}
+
+static void axgbe_an73_state_machine(struct axgbe_port *pdata)
+{
+ enum axgbe_an cur_state = pdata->an_state;
+
+ if (!pdata->an_int)
+ return;
+
+next_int:
+ if (pdata->an_int & AXGBE_AN_CL73_PG_RCV) {
+ pdata->an_state = AXGBE_AN_PAGE_RECEIVED;
+ pdata->an_int &= ~AXGBE_AN_CL73_PG_RCV;
+ } else if (pdata->an_int & AXGBE_AN_CL73_INC_LINK) {
+ pdata->an_state = AXGBE_AN_INCOMPAT_LINK;
+ pdata->an_int &= ~AXGBE_AN_CL73_INC_LINK;
+ } else if (pdata->an_int & AXGBE_AN_CL73_INT_CMPLT) {
+ pdata->an_state = AXGBE_AN_COMPLETE;
+ pdata->an_int &= ~AXGBE_AN_CL73_INT_CMPLT;
+ } else {
+ pdata->an_state = AXGBE_AN_ERROR;
+ }
+
+again:
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case AXGBE_AN_READY:
+ pdata->an_supported = 0;
+ break;
+ case AXGBE_AN_PAGE_RECEIVED:
+ pdata->an_state = axgbe_an73_page_received(pdata);
+ pdata->an_supported++;
+ break;
+ case AXGBE_AN_INCOMPAT_LINK:
+ pdata->an_supported = 0;
+ pdata->parallel_detect = 0;
+ pdata->an_state = axgbe_an73_incompat_link(pdata);
+ break;
+ case AXGBE_AN_COMPLETE:
+ pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+ break;
+ case AXGBE_AN_NO_LINK:
+ break;
+ default:
+ pdata->an_state = AXGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == AXGBE_AN_NO_LINK) {
+ pdata->an_int = 0;
+ axgbe_an73_clear_interrupts(pdata);
+ pdata->eth_dev->data->dev_link.link_status =
+ ETH_LINK_DOWN;
+ } else if (pdata->an_state == AXGBE_AN_ERROR) {
+ PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
+ cur_state);
+ pdata->an_int = 0;
+ axgbe_an73_clear_interrupts(pdata);
+ }
+
+ if (pdata->an_state >= AXGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = AXGBE_AN_READY;
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+ pdata->an_start = 0;
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+ }
+
+ if (cur_state != pdata->an_state)
+ goto again;
+
+ if (pdata->an_int)
+ goto next_int;
+
+ axgbe_an73_enable_interrupts(pdata);
+}
+
+static void axgbe_an73_isr(struct axgbe_port *pdata)
+{
+ /* Disable AN interrupts */
+ axgbe_an73_disable_interrupts(pdata);
+
+ /* Save the interrupt(s) that fired */
+ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+
+ if (pdata->an_int) {
+ /* Clear the interrupt(s) that fired and process them */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
+ pthread_mutex_lock(&pdata->an_mutex);
+ axgbe_an73_state_machine(pdata);
+ pthread_mutex_unlock(&pdata->an_mutex);
+ } else {
+ /* Enable AN interrupts */
+ axgbe_an73_enable_interrupts(pdata);
+ }
+}
+
+static void axgbe_an_isr(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_isr(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "AN_MODE_37 not supported\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_combined_isr(struct axgbe_port *pdata)
+{
+ axgbe_an_isr(pdata);
+}
+
+static void axgbe_an73_init(struct axgbe_port *pdata)
+{
+ unsigned int advertising, reg;
+
+ advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (advertising & ADVERTISED_10000baseR_FEC)
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (advertising & ADVERTISED_10000baseKR_Full)
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ if ((advertising & ADVERTISED_1000baseKX_Full) ||
+ (advertising & ADVERTISED_2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (advertising & ADVERTISED_Pause)
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+ if (advertising & ADVERTISED_Asym_Pause)
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ reg &= ~AXGBE_XNP_NP_EXCHANGE;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+}
+
+static void axgbe_an_init(struct axgbe_port *pdata)
+{
+ /* Set up advertisement registers based on current settings */
+ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_init(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_adjust_link(struct axgbe_port *pdata)
+{
+ if (pdata->phy.link) {
+ /* Flow control support */
+ pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+ if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) {
+ pdata->hw_if.config_tx_flow_control(pdata);
+ pdata->tx_pause = pdata->phy.tx_pause;
+ }
+
+ if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) {
+ pdata->hw_if.config_rx_flow_control(pdata);
+ pdata->rx_pause = pdata->phy.rx_pause;
+ }
+
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed)
+ pdata->phy_speed = pdata->phy.speed;
+ if (pdata->phy_link != pdata->phy.link)
+ pdata->phy_link = pdata->phy.link;
+ } else if (pdata->phy_link) {
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+}
+
+static int axgbe_phy_config_fixed(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+
+ /* Disable auto-negotiation */
+ axgbe_an_disable(pdata);
+
+ /* Set specified mode for specified speed */
+ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ case AXGBE_MODE_KX_2500:
+ case AXGBE_MODE_KR:
+ case AXGBE_MODE_SGMII_100:
+ case AXGBE_MODE_SGMII_1000:
+ case AXGBE_MODE_X:
+ case AXGBE_MODE_SFI:
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate duplex mode */
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ axgbe_set_mode(pdata, mode);
+
+ return 0;
+}
+
+static int __axgbe_phy_config_aneg(struct axgbe_port *pdata)
+{
+ int ret;
+
+ axgbe_set_bit(AXGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = rte_get_timer_cycles();
+
+ ret = pdata->phy_if.phy_impl.an_config(pdata);
+ if (ret)
+ return ret;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ ret = axgbe_phy_config_fixed(pdata);
+ if (ret || !pdata->kr_redrv)
+ return ret;
+ }
+
+ /* Disable auto-negotiation interrupt */
+ rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+ /* Start auto-negotiation in a supported mode */
+ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KR);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KX_2500);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KX_1000);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SFI);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_X);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_1000);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_100);
+ } else {
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+ return -EINVAL;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ axgbe_an_disable_all(pdata);
+
+ /* Clear any auto-negotitation interrupts */
+ axgbe_an_clear_interrupts_all(pdata);
+
+ pdata->an_result = AXGBE_AN_READY;
+ pdata->an_state = AXGBE_AN_READY;
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+
+ /* Re-enable auto-negotiation interrupt */
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+ axgbe_an_init(pdata);
+ axgbe_an_restart(pdata);
+
+ return 0;
+}
+
+static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
+{
+ int ret;
+
+ pthread_mutex_lock(&pdata->an_mutex);
+
+ ret = __axgbe_phy_config_aneg(pdata);
+ if (ret)
+ axgbe_set_bit(AXGBE_LINK_ERR, &pdata->dev_state);
+ else
+ axgbe_clear_bit(AXGBE_LINK_ERR, &pdata->dev_state);
+
+ pthread_mutex_unlock(&pdata->an_mutex);
+
+ return ret;
+}
+
+static bool axgbe_phy_aneg_done(struct axgbe_port *pdata)
+{
+ return pdata->an_result == AXGBE_AN_COMPLETE;
+}
+
+static void axgbe_check_link_timeout(struct axgbe_port *pdata)
+{
+ unsigned long link_timeout;
+ unsigned long ticks;
+
+ link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT *
+ 2 * rte_get_timer_hz());
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, link_timeout))
+ axgbe_phy_config_aneg(pdata);
+}
+
+static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata)
+{
+ return pdata->phy_if.phy_impl.an_outcome(pdata);
+}
+
+static void axgbe_phy_status_result(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+
+ pdata->phy.lp_advertising = 0;
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+ mode = axgbe_cur_mode(pdata);
+ else
+ mode = axgbe_phy_status_aneg(pdata);
+
+ switch (mode) {
+ case AXGBE_MODE_SGMII_100:
+ pdata->phy.speed = SPEED_100;
+ break;
+ case AXGBE_MODE_X:
+ case AXGBE_MODE_KX_1000:
+ case AXGBE_MODE_SGMII_1000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+ case AXGBE_MODE_KX_2500:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ case AXGBE_MODE_KR:
+ case AXGBE_MODE_SFI:
+ pdata->phy.speed = SPEED_10000;
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+ pdata->phy.duplex = DUPLEX_FULL;
+
+ axgbe_set_mode(pdata, mode);
+}
+
+static void axgbe_phy_status(struct axgbe_port *pdata)
+{
+ unsigned int link_aneg;
+ int an_restart;
+
+ if (axgbe_test_bit(AXGBE_LINK_ERR, &pdata->dev_state)) {
+ pdata->phy.link = 0;
+ goto adjust_link;
+ }
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
+ &an_restart);
+ if (an_restart) {
+ axgbe_phy_config_aneg(pdata);
+ return;
+ }
+
+ if (pdata->phy.link) {
+ if (link_aneg && !axgbe_phy_aneg_done(pdata)) {
+ axgbe_check_link_timeout(pdata);
+ return;
+ }
+ axgbe_phy_status_result(pdata);
+ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state))
+ axgbe_clear_bit(AXGBE_LINK_INIT, &pdata->dev_state);
+ } else {
+ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) {
+ axgbe_check_link_timeout(pdata);
+
+ if (link_aneg)
+ return;
+ }
+ axgbe_phy_status_result(pdata);
+ }
+
+adjust_link:
+ axgbe_phy_adjust_link(pdata);
+}
+
+static void axgbe_phy_stop(struct axgbe_port *pdata)
+{
+ if (!pdata->phy_started)
+ return;
+ /* Indicate the PHY is down */
+ pdata->phy_started = 0;
+ /* Disable auto-negotiation */
+ axgbe_an_disable_all(pdata);
+ pdata->phy_if.phy_impl.stop(pdata);
+ pdata->phy.link = 0;
+ axgbe_phy_adjust_link(pdata);
+}
+
+static int axgbe_phy_start(struct axgbe_port *pdata)
+{
+ int ret;
+
+ ret = pdata->phy_if.phy_impl.start(pdata);
+ if (ret)
+ return ret;
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
+ axgbe_kr_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
+ axgbe_kx_2500_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
+ axgbe_kx_1000_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
+ axgbe_sfi_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
+ axgbe_x_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
+ axgbe_sgmii_1000_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
+ axgbe_sgmii_100_mode(pdata);
+ } else {
+ ret = -EINVAL;
+ goto err_stop;
+ }
+ /* Indicate the PHY is up and running */
+ pdata->phy_started = 1;
+ axgbe_an_init(pdata);
+ axgbe_an_enable_interrupts(pdata);
+ return axgbe_phy_config_aneg(pdata);
+
+err_stop:
+ pdata->phy_if.phy_impl.stop(pdata);
+
+ return ret;
+}
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ int ret;
+
+ ret = pdata->phy_if.phy_impl.reset(pdata);
+ if (ret)
+ return ret;
+
+ /* Disable auto-negotiation for now */
+ axgbe_an_disable_all(pdata);
+
+ /* Clear auto-negotiation interrupts */
+ axgbe_an_clear_interrupts_all(pdata);
+
+ return 0;
+}
+
+static int axgbe_phy_best_advertised_speed(struct axgbe_port *pdata)
+{
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+ return SPEED_2500;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_100baseT_Full)
+ return SPEED_100;
+
+ return SPEED_UNKNOWN;
+}
+
+static int axgbe_phy_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ /* Check for FEC support */
+ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+ MDIO_PMA_10GBR_FECABLE_ERRABLE);
+
+ /* Setup the phy (including supported features) */
+ ret = pdata->phy_if.phy_impl.init(pdata);
+ if (ret)
+ return ret;
+ pdata->phy.advertising = pdata->phy.supported;
+
+ pdata->phy.address = 0;
+
+ if (pdata->phy.advertising & ADVERTISED_Autoneg) {
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ } else {
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.speed = axgbe_phy_best_advertised_speed(pdata);
+ pdata->phy.duplex = DUPLEX_FULL;
+ }
+
+ pdata->phy.link = 0;
+
+ pdata->phy.pause_autoneg = pdata->pause_autoneg;
+ pdata->phy.tx_pause = pdata->tx_pause;
+ pdata->phy.rx_pause = pdata->rx_pause;
+
+ /* Fix up Flow Control advertising */
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+ if (pdata->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+ }
+
+ if (pdata->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+ return 0;
+}
+
+void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if)
+{
+ phy_if->phy_init = axgbe_phy_init;
+ phy_if->phy_reset = axgbe_phy_reset;
+ phy_if->phy_start = axgbe_phy_start;
+ phy_if->phy_stop = axgbe_phy_stop;
+ phy_if->phy_status = axgbe_phy_status;
+ phy_if->phy_config_aneg = axgbe_phy_config_aneg;
+ phy_if->an_isr = axgbe_an_combined_isr;
+}
diff --git a/drivers/net/axgbe/axgbe_phy.h b/drivers/net/axgbe/axgbe_phy.h
new file mode 100644
index 00000000..77ee20a3
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_phy.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef __AXGBE_PHY_H__
+#define __AXGBE_PHY_H__
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_10000 10000
+
+
+/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
+ * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
+ */
+#define MII_ADDR_C45 (1 << 30)
+
+/* Basic mode status register. */
+#define BMSR_LSTATUS 0x0004 /* Link status */
+
+/* Status register 1. */
+#define MDIO_STAT1_LSTATUS BMSR_LSTATUS
+
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */
+#define MII_MMD_DATA 0x0e /* MMD Access Data Register */
+#define MII_ESTATUS 0x0f /* Extended Status */
+#define MII_DCOUNTER 0x12 /* Disconnect counter */
+#define MII_FCSCOUNTER 0x13 /* False carrier counter */
+#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
+#define MII_RERRCOUNTER 0x15 /* Receive error counter */
+#define MII_SREVISION 0x16 /* Silicon revision */
+#define MII_RESV1 0x17 /* Reserved... */
+#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */
+#define MII_PHYADDR 0x19 /* PHY address */
+#define MII_RESV2 0x1a /* Reserved... */
+#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */
+#define MII_NCONFIG 0x1c /* Network interface config */
+
+/* Basic mode control register. */
+#define BMCR_RESV 0x003f /* Unused... */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+#define BMCR_CTST 0x0080 /* Collision test */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */
+#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */
+#define BMCR_PDOWN 0x0800 /* Enable low power state */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
+#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
+#define BMCR_RESET 0x8000 /* Reset to default state */
+#define BMCR_SPEED10 0x0000 /* Select 10Mbps */
+
+
+/* MDIO Manageable Devices (MMDs). */
+#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment
+ * Physical Medium Dependent
+ */
+#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */
+#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */
+#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */
+#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
+#define MDIO_MMD_TC 6 /* Transmission Convergence */
+#define MDIO_MMD_AN 7 /* Auto-Negotiation */
+#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
+#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
+#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
+
+/* Generic MDIO registers. */
+#define MDIO_CTRL1 MII_BMCR
+#define MDIO_STAT1 MII_BMSR
+#define MDIO_DEVID1 MII_PHYSID1
+#define MDIO_DEVID2 MII_PHYSID2
+#define MDIO_SPEED 4 /* Speed ability */
+#define MDIO_DEVS1 5 /* Devices in package */
+#define MDIO_DEVS2 6
+#define MDIO_CTRL2 7 /* 10G control 2 */
+#define MDIO_STAT2 8 /* 10G status 2 */
+#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */
+#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */
+#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */
+#define MDIO_PKGID1 14 /* Package identifier */
+#define MDIO_PKGID2 15
+#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
+#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
+#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */
+#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */
+#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
+#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
+#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
+
+/* Media-dependent registers. */
+#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
+#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
+ * Lanes B-D are numbered 134-136.
+ */
+#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
+#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
+#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
+#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+
+/* Control register 1. */
+/* Enable extended speed selection */
+#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100)
+/* All speed selection bits */
+#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c)
+#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX
+#define MDIO_CTRL1_LPOWER BMCR_PDOWN
+#define MDIO_CTRL1_RESET BMCR_RESET
+#define MDIO_PMA_CTRL1_LOOPBACK 0x0001
+#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000
+#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100
+#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
+#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
+#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
+#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */
+
+
+
+
+
+/* PMA 10GBASE-R FEC ability register. */
+#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
+#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+
+
+/* Autoneg related */
+#define ADVERTISED_Autoneg (1 << 6)
+#define SUPPORTED_Autoneg (1 << 6)
+#define AUTONEG_DISABLE 0x00
+#define AUTONEG_ENABLE 0x01
+
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
+
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
+
+#define SUPPORTED_Backplane (1 << 16)
+#define SUPPORTED_TP (1 << 7)
+
+#define ADVERTISED_10000baseR_FEC (1 << 20)
+
+#define SUPPORTED_10000baseR_FEC (1 << 20)
+
+#define SUPPORTED_FIBRE (1 << 10)
+
+#define ADVERTISED_10000baseKR_Full (1 << 19)
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#define ADVERTISED_1000baseKX_Full (1 << 17)
+#define ADVERTISED_1000baseT_Full (1 << 5)
+#define ADVERTISED_100baseT_Full (1 << 3)
+#define ADVERTISED_TP (1 << 7)
+#define ADVERTISED_FIBRE (1 << 10)
+#define ADVERTISED_Backplane (1 << 16)
+
+#define SUPPORTED_1000baseKX_Full (1 << 17)
+#define SUPPORTED_10000baseKR_Full (1 << 19)
+#define SUPPORTED_2500baseX_Full (1 << 15)
+#define SUPPORTED_100baseT_Full (1 << 2)
+#define SUPPORTED_1000baseT_Full (1 << 5)
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#define SUPPORTED_2500baseX_Full (1 << 15)
+
+
+#define SPEED_UNKNOWN -1
+
+/* Duplex, half or full. */
+#define DUPLEX_HALF 0x00
+#define DUPLEX_FULL 0x01
+#define DUPLEX_UNKNOWN 0xff
+
+#endif
+/* PHY */
diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c
new file mode 100644
index 00000000..973177f6
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_phy_impl.c
@@ -0,0 +1,2191 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+#define AXGBE_PHY_PORT_SPEED_100 BIT(0)
+#define AXGBE_PHY_PORT_SPEED_1000 BIT(1)
+#define AXGBE_PHY_PORT_SPEED_2500 BIT(2)
+#define AXGBE_PHY_PORT_SPEED_10000 BIT(3)
+
+#define AXGBE_MUTEX_RELEASE 0x80000000
+
+#define AXGBE_SFP_DIRECT 7
+
+/* I2C target addresses */
+#define AXGBE_SFP_SERIAL_ID_ADDRESS 0x50
+#define AXGBE_SFP_DIAG_INFO_ADDRESS 0x51
+#define AXGBE_SFP_PHY_ADDRESS 0x56
+#define AXGBE_GPIO_ADDRESS_PCA9555 0x20
+
+/* SFP sideband signal indicators */
+#define AXGBE_GPIO_NO_TX_FAULT BIT(0)
+#define AXGBE_GPIO_NO_RATE_SELECT BIT(1)
+#define AXGBE_GPIO_NO_MOD_ABSENT BIT(2)
+#define AXGBE_GPIO_NO_RX_LOS BIT(3)
+
+/* Rate-change complete wait/retry count */
+#define AXGBE_RATECHANGE_COUNT 500
+
+/* CDR delay values for KR support (in usec) */
+#define AXGBE_CDR_DELAY_INIT 10000
+#define AXGBE_CDR_DELAY_INC 10000
+#define AXGBE_CDR_DELAY_MAX 100000
+
+enum axgbe_port_mode {
+ AXGBE_PORT_MODE_RSVD = 0,
+ AXGBE_PORT_MODE_BACKPLANE,
+ AXGBE_PORT_MODE_BACKPLANE_2500,
+ AXGBE_PORT_MODE_1000BASE_T,
+ AXGBE_PORT_MODE_1000BASE_X,
+ AXGBE_PORT_MODE_NBASE_T,
+ AXGBE_PORT_MODE_10GBASE_T,
+ AXGBE_PORT_MODE_10GBASE_R,
+ AXGBE_PORT_MODE_SFP,
+ AXGBE_PORT_MODE_MAX,
+};
+
+enum axgbe_conn_type {
+ AXGBE_CONN_TYPE_NONE = 0,
+ AXGBE_CONN_TYPE_SFP,
+ AXGBE_CONN_TYPE_MDIO,
+ AXGBE_CONN_TYPE_RSVD1,
+ AXGBE_CONN_TYPE_BACKPLANE,
+ AXGBE_CONN_TYPE_MAX,
+};
+
+/* SFP/SFP+ related definitions */
+enum axgbe_sfp_comm {
+ AXGBE_SFP_COMM_DIRECT = 0,
+ AXGBE_SFP_COMM_PCA9545,
+};
+
+enum axgbe_sfp_cable {
+ AXGBE_SFP_CABLE_UNKNOWN = 0,
+ AXGBE_SFP_CABLE_ACTIVE,
+ AXGBE_SFP_CABLE_PASSIVE,
+};
+
+enum axgbe_sfp_base {
+ AXGBE_SFP_BASE_UNKNOWN = 0,
+ AXGBE_SFP_BASE_1000_T,
+ AXGBE_SFP_BASE_1000_SX,
+ AXGBE_SFP_BASE_1000_LX,
+ AXGBE_SFP_BASE_1000_CX,
+ AXGBE_SFP_BASE_10000_SR,
+ AXGBE_SFP_BASE_10000_LR,
+ AXGBE_SFP_BASE_10000_LRM,
+ AXGBE_SFP_BASE_10000_ER,
+ AXGBE_SFP_BASE_10000_CR,
+};
+
+enum axgbe_sfp_speed {
+ AXGBE_SFP_SPEED_UNKNOWN = 0,
+ AXGBE_SFP_SPEED_100_1000,
+ AXGBE_SFP_SPEED_1000,
+ AXGBE_SFP_SPEED_10000,
+};
+
+/* SFP Serial ID Base ID values relative to an offset of 0 */
+#define AXGBE_SFP_BASE_ID 0
+#define AXGBE_SFP_ID_SFP 0x03
+
+#define AXGBE_SFP_BASE_EXT_ID 1
+#define AXGBE_SFP_EXT_ID_SFP 0x04
+
+#define AXGBE_SFP_BASE_10GBE_CC 3
+#define AXGBE_SFP_BASE_10GBE_CC_SR BIT(4)
+#define AXGBE_SFP_BASE_10GBE_CC_LR BIT(5)
+#define AXGBE_SFP_BASE_10GBE_CC_LRM BIT(6)
+#define AXGBE_SFP_BASE_10GBE_CC_ER BIT(7)
+
+#define AXGBE_SFP_BASE_1GBE_CC 6
+#define AXGBE_SFP_BASE_1GBE_CC_SX BIT(0)
+#define AXGBE_SFP_BASE_1GBE_CC_LX BIT(1)
+#define AXGBE_SFP_BASE_1GBE_CC_CX BIT(2)
+#define AXGBE_SFP_BASE_1GBE_CC_T BIT(3)
+
+#define AXGBE_SFP_BASE_CABLE 8
+#define AXGBE_SFP_BASE_CABLE_PASSIVE BIT(2)
+#define AXGBE_SFP_BASE_CABLE_ACTIVE BIT(3)
+
+#define AXGBE_SFP_BASE_BR 12
+#define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a
+#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d
+#define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64
+#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68
+
+#define AXGBE_SFP_BASE_CU_CABLE_LEN 18
+
+#define AXGBE_SFP_BASE_VENDOR_NAME 20
+#define AXGBE_SFP_BASE_VENDOR_NAME_LEN 16
+#define AXGBE_SFP_BASE_VENDOR_PN 40
+#define AXGBE_SFP_BASE_VENDOR_PN_LEN 16
+#define AXGBE_SFP_BASE_VENDOR_REV 56
+#define AXGBE_SFP_BASE_VENDOR_REV_LEN 4
+
+#define AXGBE_SFP_BASE_CC 63
+
+/* SFP Serial ID Extended ID values relative to an offset of 64 */
+#define AXGBE_SFP_BASE_VENDOR_SN 4
+#define AXGBE_SFP_BASE_VENDOR_SN_LEN 16
+
+#define AXGBE_SFP_EXTD_DIAG 28
+#define AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
+
+#define AXGBE_SFP_EXTD_SFF_8472 30
+
+#define AXGBE_SFP_EXTD_CC 31
+
+struct axgbe_sfp_eeprom {
+ u8 base[64];
+ u8 extd[32];
+ u8 vendor[32];
+};
+
+#define AXGBE_BEL_FUSE_VENDOR "BEL-FUSE"
+#define AXGBE_BEL_FUSE_PARTNO "1GBT-SFP06"
+
+struct axgbe_sfp_ascii {
+ union {
+ char vendor[AXGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
+ char partno[AXGBE_SFP_BASE_VENDOR_PN_LEN + 1];
+ char rev[AXGBE_SFP_BASE_VENDOR_REV_LEN + 1];
+ char serno[AXGBE_SFP_BASE_VENDOR_SN_LEN + 1];
+ } u;
+};
+
+/* MDIO PHY reset types */
+enum axgbe_mdio_reset {
+ AXGBE_MDIO_RESET_NONE = 0,
+ AXGBE_MDIO_RESET_I2C_GPIO,
+ AXGBE_MDIO_RESET_INT_GPIO,
+ AXGBE_MDIO_RESET_MAX,
+};
+
+/* Re-driver related definitions */
+enum axgbe_phy_redrv_if {
+ AXGBE_PHY_REDRV_IF_MDIO = 0,
+ AXGBE_PHY_REDRV_IF_I2C,
+ AXGBE_PHY_REDRV_IF_MAX,
+};
+
+enum axgbe_phy_redrv_model {
+ AXGBE_PHY_REDRV_MODEL_4223 = 0,
+ AXGBE_PHY_REDRV_MODEL_4227,
+ AXGBE_PHY_REDRV_MODEL_MAX,
+};
+
+enum axgbe_phy_redrv_mode {
+ AXGBE_PHY_REDRV_MODE_CX = 5,
+ AXGBE_PHY_REDRV_MODE_SR = 9,
+};
+
+#define AXGBE_PHY_REDRV_MODE_REG 0x12b0
+
+/* PHY related configuration information */
+struct axgbe_phy_data {
+ enum axgbe_port_mode port_mode;
+
+ unsigned int port_id;
+
+ unsigned int port_speeds;
+
+ enum axgbe_conn_type conn_type;
+
+ enum axgbe_mode cur_mode;
+ enum axgbe_mode start_mode;
+
+ unsigned int rrc_count;
+
+ unsigned int mdio_addr;
+
+ unsigned int comm_owned;
+
+ /* SFP Support */
+ enum axgbe_sfp_comm sfp_comm;
+ unsigned int sfp_mux_address;
+ unsigned int sfp_mux_channel;
+
+ unsigned int sfp_gpio_address;
+ unsigned int sfp_gpio_mask;
+ unsigned int sfp_gpio_rx_los;
+ unsigned int sfp_gpio_tx_fault;
+ unsigned int sfp_gpio_mod_absent;
+ unsigned int sfp_gpio_rate_select;
+
+ unsigned int sfp_rx_los;
+ unsigned int sfp_tx_fault;
+ unsigned int sfp_mod_absent;
+ unsigned int sfp_diags;
+ unsigned int sfp_changed;
+ unsigned int sfp_phy_avail;
+ unsigned int sfp_cable_len;
+ enum axgbe_sfp_base sfp_base;
+ enum axgbe_sfp_cable sfp_cable;
+ enum axgbe_sfp_speed sfp_speed;
+ struct axgbe_sfp_eeprom sfp_eeprom;
+
+ /* External PHY support */
+ enum axgbe_mdio_mode phydev_mode;
+ enum axgbe_mdio_reset mdio_reset;
+ unsigned int mdio_reset_addr;
+ unsigned int mdio_reset_gpio;
+
+ /* Re-driver support */
+ unsigned int redrv;
+ unsigned int redrv_if;
+ unsigned int redrv_addr;
+ unsigned int redrv_lane;
+ unsigned int redrv_model;
+
+ /* KR AN support */
+ unsigned int phy_cdr_notrack;
+ unsigned int phy_cdr_delay;
+};
+
+static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata);
+
+static int axgbe_phy_i2c_xfer(struct axgbe_port *pdata,
+ struct axgbe_i2c_op *i2c_op)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Be sure we own the bus */
+ if (!phy_data->comm_owned)
+ return -EIO;
+
+ return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
+}
+
+static int axgbe_phy_redrv_write(struct axgbe_port *pdata, unsigned int reg,
+ unsigned int val)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ uint16_t *redrv_val;
+ u8 redrv_data[5], csum;
+ unsigned int i, retry;
+ int ret;
+
+ /* High byte of register contains read/write indicator */
+ redrv_data[0] = ((reg >> 8) & 0xff) << 1;
+ redrv_data[1] = reg & 0xff;
+ redrv_val = (uint16_t *)&redrv_data[2];
+ *redrv_val = rte_cpu_to_be_16(val);
+
+ /* Calculate 1 byte checksum */
+ csum = 0;
+ for (i = 0; i < 4; i++) {
+ csum += redrv_data[i];
+ if (redrv_data[i] > csum)
+ csum++;
+ }
+ redrv_data[4] = ~csum;
+
+ retry = 1;
+again1:
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = sizeof(redrv_data);
+ i2c_op.buf = redrv_data;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ i2c_op.cmd = AXGBE_I2C_CMD_READ;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = 1;
+ i2c_op.buf = redrv_data;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+ }
+
+ if (redrv_data[0] != 0xff) {
+ PMD_DRV_LOG(ERR, "Redriver write checksum error\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target,
+ void *reg, unsigned int reg_len,
+ void *val, unsigned int val_len)
+{
+ struct axgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ retry = 1;
+again1:
+ /* Set the specified register to read */
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = reg_len;
+ i2c_op.buf = reg;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ /* Read the specfied register */
+ i2c_op.cmd = AXGBE_I2C_CMD_READ;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+}
+
+static int axgbe_phy_sfp_put_mux(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ uint8_t mux_channel;
+
+ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select no mux channels */
+ mux_channel = 0;
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static int axgbe_phy_sfp_get_mux(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ u8 mux_channel;
+
+ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select desired mux channel */
+ mux_channel = 1 << phy_data->sfp_mux_channel;
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->comm_owned = 0;
+
+ pthread_mutex_unlock(&pdata->phy_mutex);
+}
+
+static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ uint64_t timeout;
+ unsigned int mutex_id;
+
+ if (phy_data->comm_owned)
+ return 0;
+
+ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
+ * the driver needs to take the software mutex and then the hardware
+ * mutexes before being able to use the busses.
+ */
+ pthread_mutex_lock(&pdata->phy_mutex);
+
+ /* Clear the mutexes */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, AXGBE_MUTEX_RELEASE);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, AXGBE_MUTEX_RELEASE);
+
+ /* Mutex formats are the same for I2C and MDIO/GPIO */
+ mutex_id = 0;
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id);
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1);
+
+ timeout = rte_get_timer_cycles() + (rte_get_timer_hz() * 5);
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ /* Must be all zeroes in order to obtain the mutex */
+ if (XP_IOREAD(pdata, XP_I2C_MUTEX) ||
+ XP_IOREAD(pdata, XP_MDIO_MUTEX)) {
+ rte_delay_us(100);
+ continue;
+ }
+
+ /* Obtain the mutex */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
+
+ phy_data->comm_owned = 1;
+ return 0;
+ }
+
+ pthread_mutex_unlock(&pdata->phy_mutex);
+
+ PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n");
+
+ return -ETIMEDOUT;
+}
+
+static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->sfp_mod_absent) {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising = pdata->phy.supported;
+ }
+
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
+ pdata->phy.advertising &= ~ADVERTISED_TP;
+ pdata->phy.advertising &= ~ADVERTISED_FIBRE;
+ pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising |= ADVERTISED_Autoneg;
+ break;
+ case AXGBE_SFP_BASE_10000_SR:
+ case AXGBE_SFP_BASE_10000_LR:
+ case AXGBE_SFP_BASE_10000_LRM:
+ case AXGBE_SFP_BASE_10000_ER:
+ case AXGBE_SFP_BASE_10000_CR:
+ default:
+ pdata->phy.speed = SPEED_10000;
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ break;
+ }
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_CX:
+ case AXGBE_SFP_BASE_10000_CR:
+ pdata->phy.advertising |= ADVERTISED_TP;
+ break;
+ default:
+ pdata->phy.advertising |= ADVERTISED_FIBRE;
+ }
+
+ switch (phy_data->sfp_speed) {
+ case AXGBE_SFP_SPEED_100_1000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case AXGBE_SFP_SPEED_1000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case AXGBE_SFP_SPEED_10000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ break;
+ default:
+ /* Choose the fastest supported speed */
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ }
+}
+
+static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom,
+ enum axgbe_sfp_speed sfp_speed)
+{
+ u8 *sfp_base, min, max;
+
+ sfp_base = sfp_eeprom->base;
+
+ switch (sfp_speed) {
+ case AXGBE_SFP_SPEED_1000:
+ min = AXGBE_SFP_BASE_BR_1GBE_MIN;
+ max = AXGBE_SFP_BASE_BR_1GBE_MAX;
+ break;
+ case AXGBE_SFP_SPEED_10000:
+ min = AXGBE_SFP_BASE_BR_10GBE_MIN;
+ max = AXGBE_SFP_BASE_BR_10GBE_MAX;
+ break;
+ default:
+ return false;
+ }
+
+ return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) &&
+ (sfp_base[AXGBE_SFP_BASE_BR] <= max));
+}
+
+static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!phy_data->sfp_changed)
+ return;
+
+ phy_data->sfp_phy_avail = 0;
+
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return;
+}
+
+static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+
+ if (memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_NAME],
+ AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR)))
+ return false;
+
+ if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN],
+ AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) {
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
+ return true;
+ }
+
+ return false;
+}
+
+static bool axgbe_phy_sfp_parse_quirks(struct axgbe_port *pdata)
+{
+ if (axgbe_phy_belfuse_parse_quirks(pdata))
+ return true;
+
+ return false;
+}
+
+static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ uint8_t *sfp_base;
+
+ sfp_base = sfp_eeprom->base;
+
+ if (sfp_base[AXGBE_SFP_BASE_ID] != AXGBE_SFP_ID_SFP)
+ return;
+
+ if (sfp_base[AXGBE_SFP_BASE_EXT_ID] != AXGBE_SFP_EXT_ID_SFP)
+ return;
+
+ if (axgbe_phy_sfp_parse_quirks(pdata))
+ return;
+
+ /* Assume ACTIVE cable unless told it is PASSIVE */
+ if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE;
+ phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN];
+ } else {
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
+ }
+
+ /* Determine the type of SFP */
+ if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] &
+ AXGBE_SFP_BASE_10GBE_CC_LRM)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LRM;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_ER)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_ER;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_SX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_LX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_LX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_CX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_T;
+ else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) &&
+ axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR;
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_100_1000;
+ break;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
+ break;
+ case AXGBE_SFP_BASE_10000_SR:
+ case AXGBE_SFP_BASE_10000_LR:
+ case AXGBE_SFP_BASE_10000_LRM:
+ case AXGBE_SFP_BASE_10000_ER:
+ case AXGBE_SFP_BASE_10000_CR:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_10000;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool axgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf,
+ unsigned int len)
+{
+ uint8_t cc;
+
+ for (cc = 0; len; buf++, len--)
+ cc += *buf;
+
+ return (cc == cc_in) ? true : false;
+}
+
+static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom sfp_eeprom;
+ uint8_t eeprom_addr;
+ int ret;
+
+ ret = axgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error setting SFP MUX\n");
+ return ret;
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = axgbe_phy_i2c_read(pdata, AXGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ &sfp_eeprom, sizeof(sfp_eeprom));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error reading SFP EEPROM\n");
+ goto put;
+ }
+
+ /* Validate the contents read */
+ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[AXGBE_SFP_BASE_CC],
+ sfp_eeprom.base,
+ sizeof(sfp_eeprom.base) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[AXGBE_SFP_EXTD_CC],
+ sfp_eeprom.extd,
+ sizeof(sfp_eeprom.extd) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ /* Check for an added or changed SFP */
+ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) {
+ phy_data->sfp_changed = 1;
+ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
+
+ if (sfp_eeprom.extd[AXGBE_SFP_EXTD_SFF_8472]) {
+ uint8_t diag_type;
+ diag_type = sfp_eeprom.extd[AXGBE_SFP_EXTD_DIAG];
+
+ if (!(diag_type & AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
+ phy_data->sfp_diags = 1;
+ }
+ } else {
+ phy_data->sfp_changed = 0;
+ }
+
+put:
+ axgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static void axgbe_phy_sfp_signals(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int gpio_input;
+ u8 gpio_reg, gpio_ports[2];
+ int ret;
+
+ /* Read the input port registers */
+ gpio_reg = 0;
+ ret = axgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address,
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error reading SFP GPIOs\n");
+ return;
+ }
+
+ gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
+
+ if (phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_MOD_ABSENT) {
+ /* No GPIO, just assume the module is present for now */
+ phy_data->sfp_mod_absent = 0;
+ } else {
+ if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
+ phy_data->sfp_mod_absent = 0;
+ }
+
+ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_RX_LOS) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
+ phy_data->sfp_rx_los = 1;
+
+ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_TX_FAULT) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
+ phy_data->sfp_tx_fault = 1;
+}
+
+static void axgbe_phy_sfp_mod_absent(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_phy_avail = 0;
+ memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom));
+}
+
+static void axgbe_phy_sfp_reset(struct axgbe_phy_data *phy_data)
+{
+ phy_data->sfp_rx_los = 0;
+ phy_data->sfp_tx_fault = 0;
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_diags = 0;
+ phy_data->sfp_base = AXGBE_SFP_BASE_UNKNOWN;
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_UNKNOWN;
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_UNKNOWN;
+}
+
+static void axgbe_phy_sfp_detect(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Reset the SFP signals and info */
+ axgbe_phy_sfp_reset(phy_data);
+
+ ret = axgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ /* Read the SFP signals and check for module presence */
+ axgbe_phy_sfp_signals(pdata);
+ if (phy_data->sfp_mod_absent) {
+ axgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ ret = axgbe_phy_sfp_read_eeprom(pdata);
+ if (ret) {
+ /* Treat any error as if there isn't an SFP plugged in */
+ axgbe_phy_sfp_reset(phy_data);
+ axgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ axgbe_phy_sfp_parse_eeprom(pdata);
+ axgbe_phy_sfp_external_phy(pdata);
+
+put:
+ axgbe_phy_sfp_phy_settings(pdata);
+ axgbe_phy_put_comm_ownership(pdata);
+}
+
+static void axgbe_phy_phydev_flowctrl(struct axgbe_port *pdata)
+{
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+}
+
+static enum axgbe_mode axgbe_phy_an73_redrv_outcome(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ axgbe_phy_phydev_flowctrl(pdata);
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ mode = AXGBE_MODE_KR;
+ break;
+ default:
+ mode = AXGBE_MODE_SFI;
+ break;
+ }
+ } else if (ad_reg & 0x20) {
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ mode = AXGBE_MODE_KX_1000;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ mode = AXGBE_MODE_X;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ mode = AXGBE_MODE_SGMII_1000;
+ break;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ default:
+ mode = AXGBE_MODE_X;
+ break;
+ }
+ break;
+ default:
+ mode = AXGBE_MODE_SGMII_1000;
+ break;
+ }
+ } else {
+ mode = AXGBE_MODE_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum axgbe_mode axgbe_phy_an73_outcome(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x800)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80)
+ mode = AXGBE_MODE_KR;
+ else if (ad_reg & 0x20)
+ mode = AXGBE_MODE_KX_1000;
+ else
+ mode = AXGBE_MODE_UNKNOWN;
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum axgbe_mode axgbe_phy_an_outcome(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ return axgbe_phy_an73_outcome(pdata);
+ case AXGBE_AN_MODE_CL73_REDRV:
+ return axgbe_phy_an73_redrv_outcome(pdata);
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static unsigned int axgbe_phy_an_advertising(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int advertising;
+
+ /* Without a re-driver, just return current advertising */
+ if (!phy_data->redrv)
+ return pdata->phy.advertising;
+
+ /* With the KR re-driver we need to advertise a single speed */
+ advertising = pdata->phy.advertising;
+ advertising &= ~ADVERTISED_1000baseKX_Full;
+ advertising &= ~ADVERTISED_10000baseKR_Full;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_NBASE_T:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ PMD_DRV_LOG(ERR, "10GBASE_T mode is not supported\n");
+ break;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+
+ return advertising;
+}
+
+static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused)
+{
+ return 0;
+ /* Dummy API since there is no case to support
+ * external phy devices registred through kerenl apis
+ */
+}
+
+static enum axgbe_an_mode axgbe_phy_an_sfp_mode(struct axgbe_phy_data *phy_data)
+{
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ return AXGBE_AN_MODE_CL37;
+ default:
+ return AXGBE_AN_MODE_NONE;
+ }
+}
+
+static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* A KR re-driver will always require CL73 AN */
+ if (phy_data->redrv)
+ return AXGBE_AN_MODE_CL73_REDRV;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return AXGBE_AN_MODE_CL73;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return AXGBE_AN_MODE_NONE;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ return AXGBE_AN_MODE_CL37;
+ case AXGBE_PORT_MODE_NBASE_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return AXGBE_AN_MODE_CL73;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return AXGBE_AN_MODE_NONE;
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_an_sfp_mode(phy_data);
+ default:
+ return AXGBE_AN_MODE_NONE;
+ }
+}
+
+static int axgbe_phy_set_redrv_mode_mdio(struct axgbe_port *pdata,
+ enum axgbe_phy_redrv_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ u16 redrv_reg, redrv_val;
+
+ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+ redrv_val = (u16)mode;
+
+ return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
+}
+
+static int axgbe_phy_set_redrv_mode_i2c(struct axgbe_port *pdata,
+ enum axgbe_phy_redrv_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int redrv_reg;
+ int ret;
+
+ /* Calculate the register to write */
+ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+
+ ret = axgbe_phy_redrv_write(pdata, redrv_reg, mode);
+
+ return ret;
+}
+
+static void axgbe_phy_set_redrv_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_phy_redrv_mode mode;
+ int ret;
+
+ if (!phy_data->redrv)
+ return;
+
+ mode = AXGBE_PHY_REDRV_MODE_CX;
+ if ((phy_data->port_mode == AXGBE_PORT_MODE_SFP) &&
+ (phy_data->sfp_base != AXGBE_SFP_BASE_1000_CX) &&
+ (phy_data->sfp_base != AXGBE_SFP_BASE_10000_CR))
+ mode = AXGBE_PHY_REDRV_MODE_SR;
+
+ ret = axgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ if (phy_data->redrv_if)
+ axgbe_phy_set_redrv_mode_i2c(pdata, mode);
+ else
+ axgbe_phy_set_redrv_mode_mdio(pdata, mode);
+
+ axgbe_phy_put_comm_ownership(pdata);
+}
+
+static void axgbe_phy_start_ratechange(struct axgbe_port *pdata)
+{
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+}
+
+static void axgbe_phy_complete_ratechange(struct axgbe_port *pdata)
+{
+ unsigned int wait;
+
+ /* Wait for command to complete */
+ wait = AXGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+
+ rte_delay_us(1500);
+ }
+}
+
+static void axgbe_phy_rrc(struct axgbe_port *pdata)
+{
+ unsigned int s0;
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* Receiver Reset Cycle */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ axgbe_phy_complete_ratechange(pdata);
+}
+
+static void axgbe_phy_power_off(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
+}
+
+static void axgbe_phy_sfi_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ axgbe_phy_set_redrv_mode(pdata);
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* 10G/SFI */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
+ if (phy_data->sfp_cable != AXGBE_SFP_CABLE_PASSIVE) {
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+ } else {
+ if (phy_data->sfp_cable_len <= 1)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
+ else if (phy_data->sfp_cable_len <= 3)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
+ else
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+ }
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_SFI;
+}
+
+static void axgbe_phy_kr_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ axgbe_phy_set_redrv_mode(pdata);
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* 10G/KR */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_KR;
+}
+
+static enum axgbe_mode axgbe_phy_cur_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ return phy_data->cur_mode;
+}
+
+static enum axgbe_mode axgbe_phy_switch_baset_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* No switching if not 10GBase-T */
+ if (phy_data->port_mode != AXGBE_PORT_MODE_10GBASE_T)
+ return axgbe_phy_cur_mode(pdata);
+
+ switch (axgbe_phy_cur_mode(pdata)) {
+ case AXGBE_MODE_SGMII_100:
+ case AXGBE_MODE_SGMII_1000:
+ return AXGBE_MODE_KR;
+ case AXGBE_MODE_KR:
+ default:
+ return AXGBE_MODE_SGMII_1000;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_switch_bp_2500_mode(struct axgbe_port *pdata
+ __rte_unused)
+{
+ return AXGBE_MODE_KX_2500;
+}
+
+static enum axgbe_mode axgbe_phy_switch_bp_mode(struct axgbe_port *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ switch (axgbe_phy_cur_mode(pdata)) {
+ case AXGBE_MODE_KX_1000:
+ return AXGBE_MODE_KR;
+ case AXGBE_MODE_KR:
+ default:
+ return AXGBE_MODE_KX_1000;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_switch_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_switch_bp_mode(pdata);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_switch_bp_2500_mode(pdata);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_switch_baset_mode(pdata);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ case AXGBE_PORT_MODE_SFP:
+ /* No switching, so just return current mode */
+ return axgbe_phy_cur_mode(pdata);
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_basex_mode(struct axgbe_phy_data *phy_data
+ __rte_unused,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return AXGBE_MODE_X;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_baset_mode(struct axgbe_phy_data *phy_data
+ __rte_unused,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return AXGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ return AXGBE_MODE_SGMII_1000;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_sfp_mode(struct axgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return AXGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
+ return AXGBE_MODE_SGMII_1000;
+ else
+ return AXGBE_MODE_X;
+ case SPEED_10000:
+ case SPEED_UNKNOWN:
+ return AXGBE_MODE_SFI;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_bp_2500_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_2500:
+ return AXGBE_MODE_KX_2500;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_bp_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return AXGBE_MODE_KX_1000;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_mode(struct axgbe_port *pdata,
+ int speed)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_get_bp_mode(speed);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_get_bp_2500_mode(speed);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_get_baset_mode(phy_data, speed);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return axgbe_phy_get_basex_mode(phy_data, speed);
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_get_sfp_mode(phy_data, speed);
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static void axgbe_phy_set_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KR:
+ axgbe_phy_kr_mode(pdata);
+ break;
+ case AXGBE_MODE_SFI:
+ axgbe_phy_sfi_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool axgbe_phy_check_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode, u32 advert)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & advert)
+ return true;
+ } else {
+ enum axgbe_mode cur_mode;
+
+ cur_mode = axgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return true;
+ }
+
+ return false;
+}
+
+static bool axgbe_phy_use_basex_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_X:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_baset_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_SGMII_100:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case AXGBE_MODE_SGMII_1000:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_sfp_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (mode) {
+ case AXGBE_MODE_X:
+ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_SGMII_100:
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case AXGBE_MODE_SGMII_1000:
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_SFI:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_bp_2500_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_2500:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_2500baseX_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_bp_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseKX_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseKR_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_use_bp_mode(pdata, mode);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_use_bp_2500_mode(pdata, mode);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_use_baset_mode(pdata, mode);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return axgbe_phy_use_basex_mode(pdata, mode);
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_use_sfp_mode(pdata, mode);
+ default:
+ return false;
+ }
+}
+
+static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ *an_restart = 0;
+
+ if (phy_data->port_mode == AXGBE_PORT_MODE_SFP) {
+ /* Check SFP signals */
+ axgbe_phy_sfp_detect(pdata);
+
+ if (phy_data->sfp_changed) {
+ *an_restart = 1;
+ return 0;
+ }
+
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ return 0;
+ }
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS)
+ return 1;
+
+ /* No link, attempt a receiver reset cycle */
+ if (phy_data->rrc_count++) {
+ phy_data->rrc_count = 0;
+ axgbe_phy_rrc(pdata);
+ }
+
+ return 0;
+}
+
+static void axgbe_phy_sfp_gpio_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+
+ phy_data->sfp_gpio_address = AXGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR);
+
+ phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK);
+
+ phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RX_LOS);
+ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_TX_FAULT);
+ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_MOD_ABS);
+ phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RATE_SELECT);
+}
+
+static void axgbe_phy_sfp_comm_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg, mux_addr_hi, mux_addr_lo;
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+
+ mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
+ mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
+ if (mux_addr_lo == AXGBE_SFP_DIRECT)
+ return;
+
+ phy_data->sfp_comm = AXGBE_SFP_COMM_PCA9545;
+ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
+ phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN);
+}
+
+static void axgbe_phy_sfp_setup(struct axgbe_port *pdata)
+{
+ axgbe_phy_sfp_comm_setup(pdata);
+ axgbe_phy_sfp_gpio_setup(pdata);
+}
+
+static bool axgbe_phy_redrv_error(struct axgbe_phy_data *phy_data)
+{
+ if (!phy_data->redrv)
+ return false;
+
+ if (phy_data->redrv_if >= AXGBE_PHY_REDRV_IF_MAX)
+ return true;
+
+ switch (phy_data->redrv_model) {
+ case AXGBE_PHY_REDRV_MODEL_4223:
+ if (phy_data->redrv_lane > 3)
+ return true;
+ break;
+ case AXGBE_PHY_REDRV_MODEL_4227:
+ if (phy_data->redrv_lane > 1)
+ return true;
+ break;
+ default:
+ return true;
+ }
+
+ return false;
+}
+
+static int axgbe_phy_mdio_reset_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ if (phy_data->conn_type != AXGBE_CONN_TYPE_MDIO)
+ return 0;
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+ phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
+ switch (phy_data->mdio_reset) {
+ case AXGBE_MDIO_RESET_NONE:
+ case AXGBE_MDIO_RESET_I2C_GPIO:
+ case AXGBE_MDIO_RESET_INT_GPIO:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported MDIO reset (%#x)\n",
+ phy_data->mdio_reset);
+ return -EINVAL;
+ }
+ if (phy_data->mdio_reset == AXGBE_MDIO_RESET_I2C_GPIO) {
+ phy_data->mdio_reset_addr = AXGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_ADDR);
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_GPIO);
+ } else if (phy_data->mdio_reset == AXGBE_MDIO_RESET_INT_GPIO) {
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_INT_GPIO);
+ }
+
+ return 0;
+}
+
+static bool axgbe_phy_port_mode_mismatch(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_NBASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool axgbe_phy_conn_type_mismatch(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_BACKPLANE)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_MDIO)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_SFP)
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool axgbe_phy_port_enabled(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
+ return false;
+ if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE))
+ return false;
+
+ return true;
+}
+
+static void axgbe_phy_cdr_track(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->vdata->an_cdr_workaround)
+ return;
+
+ if (!phy_data->phy_cdr_notrack)
+ return;
+
+ rte_delay_us(phy_data->phy_cdr_delay + 400);
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ AXGBE_PMA_CDR_TRACK_EN_MASK,
+ AXGBE_PMA_CDR_TRACK_EN_ON);
+
+ phy_data->phy_cdr_notrack = 0;
+}
+
+static void axgbe_phy_cdr_notrack(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->vdata->an_cdr_workaround)
+ return;
+
+ if (phy_data->phy_cdr_notrack)
+ return;
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ AXGBE_PMA_CDR_TRACK_EN_MASK,
+ AXGBE_PMA_CDR_TRACK_EN_OFF);
+
+ axgbe_phy_rrc(pdata);
+
+ phy_data->phy_cdr_notrack = 1;
+}
+
+static void axgbe_phy_kr_training_post(struct axgbe_port *pdata)
+{
+ if (!pdata->cdr_track_early)
+ axgbe_phy_cdr_track(pdata);
+}
+
+static void axgbe_phy_kr_training_pre(struct axgbe_port *pdata)
+{
+ if (pdata->cdr_track_early)
+ axgbe_phy_cdr_track(pdata);
+}
+
+static void axgbe_phy_an_post(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != AXGBE_MODE_KR)
+ break;
+
+ axgbe_phy_cdr_track(pdata);
+
+ switch (pdata->an_result) {
+ case AXGBE_AN_READY:
+ case AXGBE_AN_COMPLETE:
+ break;
+ default:
+ if (phy_data->phy_cdr_delay < AXGBE_CDR_DELAY_MAX)
+ phy_data->phy_cdr_delay += AXGBE_CDR_DELAY_INC;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_an_pre(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != AXGBE_MODE_KR)
+ break;
+
+ axgbe_phy_cdr_notrack(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_stop(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Reset SFP data */
+ axgbe_phy_sfp_reset(phy_data);
+ axgbe_phy_sfp_mod_absent(pdata);
+
+ /* Reset CDR support */
+ axgbe_phy_cdr_track(pdata);
+
+ /* Power off the PHY */
+ axgbe_phy_power_off(pdata);
+
+ /* Stop the I2C controller */
+ pdata->i2c_if.i2c_stop(pdata);
+}
+
+static int axgbe_phy_start(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Start the I2C controller */
+ ret = pdata->i2c_if.i2c_start(pdata);
+ if (ret)
+ return ret;
+
+ /* Start in highest supported mode */
+ axgbe_phy_set_mode(pdata, phy_data->start_mode);
+
+ /* Reset CDR support */
+ axgbe_phy_cdr_track(pdata);
+
+ /* After starting the I2C controller, we can check for an SFP */
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_SFP:
+ axgbe_phy_sfp_detect(pdata);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_mode cur_mode;
+
+ /* Reset by power cycling the PHY */
+ cur_mode = phy_data->cur_mode;
+ axgbe_phy_power_off(pdata);
+ /* First time reset is done with passed unknown mode*/
+ axgbe_phy_set_mode(pdata, cur_mode);
+ return 0;
+}
+
+static int axgbe_phy_init(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data;
+ unsigned int reg;
+ int ret;
+
+ /* Check if enabled */
+ if (!axgbe_phy_port_enabled(pdata)) {
+ PMD_DRV_LOG(ERR, "device is not enabled\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the I2C controller */
+ ret = pdata->i2c_if.i2c_init(pdata);
+ if (ret)
+ return ret;
+
+ phy_data = rte_zmalloc("phy_data memory", sizeof(*phy_data), 0);
+ if (!phy_data) {
+ PMD_DRV_LOG(ERR, "phy_data allocation failed\n");
+ return -ENOMEM;
+ }
+ pdata->phy_data = phy_data;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE);
+ phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS);
+ phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE);
+ phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+ phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT);
+ phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF);
+ phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR);
+ phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE);
+ phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
+
+ /* Validate the connection requested */
+ if (axgbe_phy_conn_type_mismatch(pdata)) {
+ PMD_DRV_LOG(ERR, "phy mode/connection mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->conn_type);
+ return -EINVAL;
+ }
+
+ /* Validate the mode requested */
+ if (axgbe_phy_port_mode_mismatch(pdata)) {
+ PMD_DRV_LOG(ERR, "phy mode/speed mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->port_speeds);
+ return -EINVAL;
+ }
+
+ /* Check for and validate MDIO reset support */
+ ret = axgbe_phy_mdio_reset_setup(pdata);
+ if (ret)
+ return ret;
+
+ /* Validate the re-driver information */
+ if (axgbe_phy_redrv_error(phy_data)) {
+ PMD_DRV_LOG(ERR, "phy re-driver settings error\n");
+ return -EINVAL;
+ }
+ pdata->kr_redrv = phy_data->redrv;
+
+ /* Indicate current mode is unknown */
+ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
+
+ /* Initialize supported features */
+ pdata->phy.supported = 0;
+
+ switch (phy_data->port_mode) {
+ /* Backplane support */
+ case AXGBE_PORT_MODE_BACKPLANE:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = AXGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_2500;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* MDIO 1GBase-T support */
+ case AXGBE_PORT_MODE_1000BASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO Base-X support */
+ case AXGBE_PORT_MODE_1000BASE_X:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_X;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO NBase-T support */
+ case AXGBE_PORT_MODE_NBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) {
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_2500;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-T support */
+ case AXGBE_PORT_MODE_10GBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* 10GBase-R support */
+ case AXGBE_PORT_MODE_10GBASE_R:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = AXGBE_MODE_SFI;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* SFP support */
+ case AXGBE_PORT_MODE_SFP:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SFI;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+
+ axgbe_phy_sfp_setup(pdata);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((phy_data->conn_type & AXGBE_CONN_TYPE_MDIO) &&
+ (phy_data->phydev_mode != AXGBE_MDIO_MODE_NONE)) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
+ phy_data->phydev_mode);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "mdio port/clause not compatible (%d/%u)\n",
+ phy_data->mdio_addr, phy_data->phydev_mode);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_data->redrv && !phy_data->redrv_if) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
+ AXGBE_MDIO_MODE_CL22);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "redriver mdio port not compatible (%u)\n",
+ phy_data->redrv_addr);
+ return -EINVAL;
+ }
+ }
+
+ phy_data->phy_cdr_delay = AXGBE_CDR_DELAY_INIT;
+ return 0;
+}
+void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if)
+{
+ struct axgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = axgbe_phy_init;
+ phy_impl->reset = axgbe_phy_reset;
+ phy_impl->start = axgbe_phy_start;
+ phy_impl->stop = axgbe_phy_stop;
+ phy_impl->link_status = axgbe_phy_link_status;
+ phy_impl->use_mode = axgbe_phy_use_mode;
+ phy_impl->set_mode = axgbe_phy_set_mode;
+ phy_impl->get_mode = axgbe_phy_get_mode;
+ phy_impl->switch_mode = axgbe_phy_switch_mode;
+ phy_impl->cur_mode = axgbe_phy_cur_mode;
+ phy_impl->an_mode = axgbe_phy_an_mode;
+ phy_impl->an_config = axgbe_phy_an_config;
+ phy_impl->an_advertising = axgbe_phy_an_advertising;
+ phy_impl->an_outcome = axgbe_phy_an_outcome;
+
+ phy_impl->an_pre = axgbe_phy_an_pre;
+ phy_impl->an_post = axgbe_phy_an_post;
+
+ phy_impl->kr_training_pre = axgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = axgbe_phy_kr_training_post;
+}
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
new file mode 100644
index 00000000..b302bdd1
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_rxtx.h"
+#include "axgbe_phy.h"
+
+#include <rte_time.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+static void
+axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (rx_queue) {
+ sw_ring = rx_queue->sw_ring;
+ if (sw_ring) {
+ for (i = 0; i < rx_queue->nb_desc; i++) {
+ if (sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(rx_queue);
+ }
+}
+
+void axgbe_dev_rx_queue_release(void *rxq)
+{
+ axgbe_rx_queue_release(rxq);
+}
+
+int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint32_t size;
+ const struct rte_memzone *dma;
+ struct axgbe_rx_queue *rxq;
+ uint32_t rx_desc = nb_desc;
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ /*
+ * validate Rx descriptors count
+ * should be power of 2 and less than h/w supported
+ */
+ if ((!rte_is_power_of_2(rx_desc)) ||
+ rx_desc > pdata->rx_desc_count)
+ return -EINVAL;
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue",
+ sizeof(struct axgbe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
+ return -ENOMEM;
+ }
+
+ rxq->cur = 0;
+ rxq->dirty = 0;
+ rxq->pdata = pdata;
+ rxq->mb_pool = mp;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->nb_desc = rx_desc;
+ rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * rxq->queue_id));
+ rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
+ DMA_CH_RDTR_LO);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
+
+ /* CRC strip in AXGBE supports per port not per queue */
+ pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
+ rxq->free_thresh = rx_conf->rx_free_thresh ?
+ rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
+ if (rxq->free_thresh > rxq->nb_desc)
+ rxq->free_thresh = rxq->nb_desc >> 3;
+
+ /* Allocate RX ring hardware descriptors */
+ size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
+ dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
+ socket_id);
+ if (!dma) {
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
+ axgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
+ rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
+ memset((void *)rxq->desc, 0, size);
+ /* Allocate software ring */
+ size = rxq->nb_desc * sizeof(struct rte_mbuf *);
+ rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
+ axgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ dev->data->rx_queues[queue_idx] = rxq;
+ if (!pdata->rx_queues)
+ pdata->rx_queues = dev->data->rx_queues;
+
+ return 0;
+}
+
+static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int rx_status;
+ unsigned long rx_timeout;
+
+ /* The Rx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
+ * wait forever though...
+ */
+ rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+
+ while (time_before(rte_get_timer_cycles(), rx_timeout)) {
+ rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
+ if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
+ (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), rx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Rx queue %u to empty\n",
+ queue);
+}
+
+void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Prepare for Rx DMA channel stop */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ axgbe_prepare_rx_stop(pdata, i);
+ }
+ /* Disable each Rx queue */
+ AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ /* Disable Rx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
+ }
+}
+
+void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+ unsigned int reg_val = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ /* Enable Rx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
+ }
+
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ reg_val |= (0x02 << (i << 1));
+ AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ /* Frame is forwarded after stripping CRC to application*/
+ if (pdata->crc_strip_enable) {
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ }
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+/* Rx function one to one refresh */
+uint16_t
+axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint16_t nb_rx = 0;
+ struct axgbe_rx_queue *rxq = rx_queue;
+ volatile union axgbe_rx_desc *desc;
+ uint64_t old_dirty = rxq->dirty;
+ struct rte_mbuf *mbuf, *tmbuf;
+ unsigned int err;
+ uint32_t error_status;
+ uint16_t idx, pidx, pkt_len;
+
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+ while (nb_rx < nb_pkts) {
+ if (unlikely(idx == rxq->nb_desc))
+ idx = 0;
+
+ desc = &rxq->desc[idx];
+
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+ break;
+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!tmbuf)) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u\n",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ rte_eth_devices[
+ rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ pidx = idx + 1;
+ if (unlikely(pidx == rxq->nb_desc))
+ pidx = 0;
+
+ rte_prefetch0(rxq->sw_ring[pidx]);
+ if ((pidx & 0x3) == 0) {
+ rte_prefetch0(&rxq->desc[pidx]);
+ rte_prefetch0(&rxq->sw_ring[pidx]);
+ }
+
+ mbuf = rxq->sw_ring[idx];
+ /* Check for any errors and free mbuf*/
+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ES);
+ error_status = 0;
+ if (unlikely(err)) {
+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+ if ((error_status != AXGBE_L3_CSUM_ERR) &&
+ (error_status != AXGBE_L4_CSUM_ERR)) {
+ rxq->errors++;
+ rte_pktmbuf_free(mbuf);
+ goto err_set;
+ }
+ }
+ if (rxq->pdata->rx_csum_enable) {
+ mbuf->ol_flags = 0;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else if (
+ unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+ /* Get the RSS hash */
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
+ PL) - rxq->crc_len;
+ /* Mbuf populate */
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ mbuf->pkt_len = pkt_len;
+ mbuf->data_len = pkt_len;
+ rxq->bytes += pkt_len;
+ rx_pkts[nb_rx++] = mbuf;
+err_set:
+ rxq->cur++;
+ rxq->sw_ring[idx++] = tmbuf;
+ desc->read.baddr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+ memset((void *)(&desc->read.desc2), 0, 8);
+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+ rxq->dirty++;
+ }
+ rxq->pkts += nb_rx;
+ if (rxq->dirty != old_dirty) {
+ rte_wmb();
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (idx * sizeof(union axgbe_rx_desc))));
+ }
+
+ return nb_rx;
+}
+
+/* Tx Apis */
+static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (tx_queue) {
+ sw_ring = tx_queue->sw_ring;
+ if (sw_ring) {
+ for (i = 0; i < tx_queue->nb_desc; i++) {
+ if (sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(tx_queue);
+ }
+}
+
+void axgbe_dev_tx_queue_release(void *txq)
+{
+ axgbe_tx_queue_release(txq);
+}
+
+int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint32_t tx_desc;
+ struct axgbe_port *pdata;
+ struct axgbe_tx_queue *txq;
+ unsigned int tsize;
+ const struct rte_memzone *tz;
+
+ tx_desc = nb_desc;
+ pdata = (struct axgbe_port *)dev->data->dev_private;
+
+ /*
+ * validate tx descriptors count
+ * should be power of 2 and less than h/w supported
+ */
+ if ((!rte_is_power_of_2(tx_desc)) ||
+ tx_desc > pdata->tx_desc_count ||
+ tx_desc < AXGBE_MIN_RING_DESC)
+ return -EINVAL;
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (!txq)
+ return -ENOMEM;
+ txq->pdata = pdata;
+
+ txq->nb_desc = tx_desc;
+ txq->free_thresh = tx_conf->tx_free_thresh ?
+ tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
+ if (txq->free_thresh > txq->nb_desc)
+ txq->free_thresh = (txq->nb_desc >> 1);
+ txq->free_batch_cnt = txq->free_thresh;
+
+ /* In vector_tx path threshold should be multiple of queue_size*/
+ if (txq->nb_desc % txq->free_thresh != 0)
+ txq->vector_disable = 1;
+
+ if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=
+ ETH_TXQ_FLAGS_NOOFFLOADS) {
+ txq->vector_disable = 1;
+ }
+
+ /* Allocate TX ring hardware descriptors */
+ tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tsize, AXGBE_DESC_ALIGN, socket_id);
+ if (!tz) {
+ axgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ memset(tz->addr, 0, tsize);
+ txq->ring_phys_addr = (uint64_t)tz->phys_addr;
+ txq->desc = tz->addr;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * txq->queue_id));
+ txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
+ DMA_CH_TDTR_LO);
+ txq->cur = 0;
+ txq->dirty = 0;
+ txq->nb_desc_free = txq->nb_desc;
+ /* Allocate software ring */
+ tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
+ txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->sw_ring) {
+ axgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ dev->data->tx_queues[queue_idx] = txq;
+ if (!pdata->tx_queues)
+ pdata->tx_queues = dev->data->tx_queues;
+
+ if (txq->vector_disable)
+ dev->tx_pkt_burst = &axgbe_xmit_pkts;
+ else
+#ifdef RTE_ARCH_X86
+ dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
+#else
+ dev->tx_pkt_burst = &axgbe_xmit_pkts;
+#endif
+
+ return 0;
+}
+
+static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Tx queue to empty the Tx fifo. Don't
+ * wait forever though...
+ */
+ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
+ tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
+ if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
+ (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), tx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Tx queue %u to empty\n",
+ queue);
+}
+
+static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
+ return axgbe_txq_prepare_tx_stop(pdata, queue);
+
+ /* Calculate the status register to read and the position within */
+ if (queue < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
+ tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), tx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ queue);
+}
+
+void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
+{
+ struct axgbe_tx_queue *txq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ /* Prepare for stopping DMA channel */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ txq = dev->data->tx_queues[i];
+ axgbe_prepare_tx_stop(pdata, i);
+ }
+ /* Disable MAC Tx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+ /* Disable each Tx queue*/
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ 0);
+ /* Disable each Tx DMA channel */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
+ }
+}
+
+void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
+{
+ struct axgbe_tx_queue *txq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Enable Tx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
+ }
+ /* Enable Tx queue*/
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+ /* Enable MAC Tx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+/* Free Tx conformed mbufs */
+static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
+ while (txq->cur != txq->dirty) {
+ if (unlikely(idx == txq->nb_desc))
+ idx = 0;
+ desc = &txq->desc[idx];
+ /* Check for ownership */
+ if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
+ return;
+ memset((void *)&desc->desc2, 0, 8);
+ /* Free mbuf */
+ rte_pktmbuf_free(txq->sw_ring[idx]);
+ txq->sw_ring[idx++] = NULL;
+ txq->dirty++;
+ }
+}
+
+/* Tx Descriptor formation
+ * Considering each mbuf requires one desc
+ * mbuf is linear
+ */
+static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
+ struct rte_mbuf *mbuf)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+ uint64_t mask;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(mbuf);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ mbuf->pkt_len);
+ /* Total msg length to transmit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+ mbuf->pkt_len);
+ /* Mark it as First and Last Descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & PKT_TX_L4_MASK;
+ if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+ rte_wmb();
+
+ /* Set OWN bit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = mbuf;
+ /* Update current index*/
+ txq->cur++;
+ /* Update stats */
+ txq->bytes += mbuf->pkt_len;
+
+ return 0;
+}
+
+/* Eal supported tx wrapper*/
+uint16_t
+axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ struct axgbe_tx_queue *txq;
+ uint16_t nb_desc_free;
+ uint16_t nb_pkt_sent = 0;
+ uint16_t idx;
+ uint32_t tail_addr;
+ struct rte_mbuf *mbuf;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+ if (unlikely(nb_desc_free <= txq->free_thresh)) {
+ axgbe_xmit_cleanup(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free == 0))
+ return 0;
+ }
+ nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
+ while (nb_pkts--) {
+ mbuf = *tx_pkts++;
+ if (axgbe_xmit_hw(txq, mbuf))
+ goto out;
+ nb_pkt_sent++;
+ }
+out:
+ /* Sync read and write */
+ rte_mb();
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ tail_addr = low32_value(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+ txq->pkts += nb_pkt_sent;
+ return nb_pkt_sent;
+}
+
+void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint8_t i;
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq) {
+ axgbe_rx_queue_release(rxq);
+ dev->data->rx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ if (txq) {
+ axgbe_tx_queue_release(txq);
+ dev->data->tx_queues[i] = NULL;
+ }
+ }
+}
diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
new file mode 100644
index 00000000..917da58c
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef _AXGBE_RXTX_H_
+#define _AXGBE_RXTX_H_
+
+/* to suppress gcc warnings related to descriptor casting*/
+#ifdef RTE_TOOLCHAIN_GCC
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+#ifdef RTE_TOOLCHAIN_CLANG
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+/* Descriptor related defines */
+#define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/
+#define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3)
+#define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1)
+#define AXGBE_MIN_RING_DESC 32
+#define RTE_AXGBE_DESCS_PER_LOOP 4
+#define RTE_AXGBE_MAX_RX_BURST 32
+
+#define AXGBE_RX_FREE_THRESH 32
+#define AXGBE_TX_FREE_THRESH 32
+
+#define AXGBE_DESC_ALIGN 128
+#define AXGBE_DESC_OWN 0x80000000
+#define AXGBE_ERR_STATUS 0x000f0000
+#define AXGBE_L3_CSUM_ERR 0x00050000
+#define AXGBE_L4_CSUM_ERR 0x00060000
+
+#include "axgbe_common.h"
+
+#define AXGBE_GET_DESC_PT(_queue, _idx) \
+ (((_queue)->desc) + \
+ ((_idx) & ((_queue)->nb_desc - 1)))
+
+#define AXGBE_GET_DESC_IDX(_queue, _idx) \
+ ((_idx) & ((_queue)->nb_desc - 1)) \
+
+/* Rx desc format */
+union axgbe_rx_desc {
+ struct {
+ uint64_t baddr;
+ uint32_t desc2;
+ uint32_t desc3;
+ } read;
+ struct {
+ uint32_t desc0;
+ uint32_t desc1;
+ uint32_t desc2;
+ uint32_t desc3;
+ } write;
+};
+
+struct axgbe_rx_queue {
+ /* membuf pool for rx buffers */
+ struct rte_mempool *mb_pool;
+ /* H/w Rx buffer size configured in DMA */
+ unsigned int buf_size;
+ /* CRC h/w offload */
+ uint16_t crc_len;
+ /* address of s/w rx buffers */
+ struct rte_mbuf **sw_ring;
+ /* Port private data */
+ struct axgbe_port *pdata;
+ /* Number of Rx descriptors in queue */
+ uint16_t nb_desc;
+ /* max free RX desc to hold */
+ uint16_t free_thresh;
+ /* Index of descriptor to check for packet availability */
+ uint64_t cur;
+ /* Index of descriptor to check for buffer reallocation */
+ uint64_t dirty;
+ /* Software Rx descriptor ring*/
+ volatile union axgbe_rx_desc *desc;
+ /* Ring physical address */
+ uint64_t ring_phys_addr;
+ /* Dma Channel register address */
+ void *dma_regs;
+ /* Dma channel tail register address*/
+ volatile uint32_t *dma_tail_reg;
+ /* DPDK queue index */
+ uint16_t queue_id;
+ /* dpdk port id*/
+ uint16_t port_id;
+ /* queue stats */
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t errors;
+ /* Number of mbufs allocated from pool*/
+ uint64_t mbuf_alloc;
+
+} __rte_cache_aligned;
+
+/*Tx descriptor format */
+struct axgbe_tx_desc {
+ phys_addr_t baddr;
+ uint32_t desc2;
+ uint32_t desc3;
+};
+
+struct axgbe_tx_queue {
+ /* Port private data reference */
+ struct axgbe_port *pdata;
+ /* Number of Tx descriptors in queue*/
+ uint16_t nb_desc;
+ /* Start freeing TX buffers if there are less free descriptors than
+ * this value
+ */
+ uint16_t free_thresh;
+ /* Available descriptors for Tx processing*/
+ uint16_t nb_desc_free;
+ /* Batch of mbufs/descs to release */
+ uint16_t free_batch_cnt;
+ /* Flag for vector support */
+ uint16_t vector_disable;
+ /* Index of descriptor to be used for current transfer */
+ uint64_t cur;
+ /* Index of descriptor to check for transfer complete */
+ uint64_t dirty;
+ /* Virtual address of ring */
+ volatile struct axgbe_tx_desc *desc;
+ /* Physical address of ring */
+ uint64_t ring_phys_addr;
+ /* Dma channel register space */
+ void *dma_regs;
+ /* Dma tail register address of ring*/
+ volatile uint32_t *dma_tail_reg;
+ /* Tx queue index/id*/
+ uint16_t queue_id;
+ /* Reference to hold Tx mbufs mapped to Tx descriptors freed
+ * after transmission confirmation
+ */
+ struct rte_mbuf **sw_ring;
+ /* dpdk port id*/
+ uint16_t port_id;
+ /* queue stats */
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t errors;
+
+} __rte_cache_aligned;
+
+/*Queue related APIs */
+
+/*
+ * RX/TX function prototypes
+ */
+
+
+void axgbe_dev_tx_queue_release(void *txq);
+int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
+int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+
+void axgbe_dev_rx_queue_release(void *rxq);
+int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
+int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+#endif /* _AXGBE_RXTX_H_ */
diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
new file mode 100644
index 00000000..9be70371
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_rxtx.h"
+#include "axgbe_phy.h"
+
+#include <rte_time.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+/* Useful to avoid shifting for every descriptor prepration*/
+#define TX_DESC_CTRL_FLAGS 0xb000000000000000
+#define TX_FREE_BULK 8
+#define TX_FREE_BULK_CHECK (TX_FREE_BULK - 1)
+
+static inline void
+axgbe_vec_tx(volatile struct axgbe_tx_desc *desc,
+ struct rte_mbuf *mbuf)
+{
+ __m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 |
+ TX_DESC_CTRL_FLAGS | mbuf->data_len,
+ mbuf->buf_iova
+ + mbuf->data_off);
+ _mm_store_si128((__m128i *)desc, descriptor);
+}
+
+static void
+axgbe_xmit_cleanup_vec(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ int idx, i;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt
+ - 1);
+ desc = &txq->desc[idx];
+ if (desc->desc3 & AXGBE_DESC_OWN)
+ return;
+ /* memset avoided for desc ctrl fields since in vec_tx path
+ * all 128 bits are populated
+ */
+ for (i = 0; i < txq->free_batch_cnt; i++, idx--)
+ rte_pktmbuf_free_seg(txq->sw_ring[idx]);
+
+
+ txq->dirty += txq->free_batch_cnt;
+ txq->nb_desc_free += txq->free_batch_cnt;
+}
+
+uint16_t
+axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ struct axgbe_tx_queue *txq;
+ uint16_t idx, nb_commit, loop, i;
+ uint32_t tail_addr;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+ if (txq->nb_desc_free < txq->free_thresh) {
+ axgbe_xmit_cleanup_vec(txq);
+ if (unlikely(txq->nb_desc_free == 0))
+ return 0;
+ }
+ nb_pkts = RTE_MIN(txq->nb_desc_free, nb_pkts);
+ nb_commit = nb_pkts;
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ loop = txq->nb_desc - idx;
+ if (nb_commit >= loop) {
+ for (i = 0; i < loop; ++i, ++idx, ++tx_pkts) {
+ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
+ txq->sw_ring[idx] = *tx_pkts;
+ }
+ nb_commit -= loop;
+ idx = 0;
+ }
+ for (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) {
+ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
+ txq->sw_ring[idx] = *tx_pkts;
+ }
+ txq->cur += nb_pkts;
+ tail_addr = (uint32_t)(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ rte_write32(tail_addr, (void *)txq->dma_tail_reg);
+ txq->pkts += nb_pkts;
+ txq->nb_desc_free -= nb_pkts;
+
+ return nb_pkts;
+}
diff --git a/drivers/net/axgbe/meson.build b/drivers/net/axgbe/meson.build
new file mode 100644
index 00000000..548ffff7
--- /dev/null
+++ b/drivers/net/axgbe/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+sources = files('axgbe_ethdev.c',
+ 'axgbe_dev.c',
+ 'axgbe_mdio.c',
+ 'axgbe_phy_impl.c',
+ 'axgbe_i2c.c',
+ 'axgbe_rxtx.c')
+
+cflags += '-Wno-cast-qual'
+
+if arch_subdir == 'x86'
+ sources += files('axgbe_rxtx_vec_sse.c')
+endif
diff --git a/drivers/net/axgbe/rte_pmd_axgbe_version.map b/drivers/net/axgbe/rte_pmd_axgbe_version.map
new file mode 100644
index 00000000..b26efa67
--- /dev/null
+++ b/drivers/net/axgbe/rte_pmd_axgbe_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/net/bnx2x/LICENSE.bnx2x_pmd b/drivers/net/bnx2x/LICENSE.bnx2x_pmd
index 96c7c1e1..64c6ef2c 100644
--- a/drivers/net/bnx2x/LICENSE.bnx2x_pmd
+++ b/drivers/net/bnx2x/LICENSE.bnx2x_pmd
@@ -1,28 +1,3 @@
-/*
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Cavium Inc.
*/
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index 90ff8b1e..150b4cfa 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -1,3 +1,8 @@
+# Copyright (c) 2014 - 2018 Cavium Inc.
+# All rights reserved.
+# www.cavium.com
+#
+# See LICENSE.bnx2x_pmd for copyright and licensing details.
include $(RTE_SDK)/mk/rte.vars.mk
#
@@ -17,10 +22,6 @@ EXPORT_MAP := rte_pmd_bnx2x_version.map
LIBABIVER := 1
-ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
-CFLAGS += -wd188 #188: enumerated type mixed with another type
-endif
-
#
# all source are stored in SRCS-y
#
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index fb02d0f3..84ade5fb 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -6,9 +6,9 @@
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
@@ -170,16 +170,16 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
dma->sc = sc;
if (IS_PF(sc))
- sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
+ snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
rte_get_timer_cycles());
else
- sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
+ snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
rte_get_timer_cycles());
/* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */
- z = rte_memzone_reserve_aligned(mz_name, (uint64_t) (size),
+ z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size,
SOCKET_ID_ANY,
- 0, align);
+ RTE_MEMZONE_IOVA_CONTIG, align);
if (z == NULL) {
PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg);
return -ENOMEM;
@@ -8285,16 +8285,6 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
}
-
-/*
- * Enable internal target-read (in case we are probed after PF
- * FLR). Must be done prior to any BAR read access. Only for
- * 57712 and up
- */
- if (!CHIP_IS_E1x(sc)) {
- REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
- 1);
- }
}
/* get the nvram size */
@@ -9671,7 +9661,17 @@ int bnx2x_attach(struct bnx2x_softc *sc)
bnx2x_init_rte(sc);
if (IS_PF(sc)) {
-/* get device info and set params */
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
+ 1);
+ DELAY(200000);
+ }
+
+ /* get device info and set params */
if (bnx2x_get_device_info(sc) != 0) {
PMD_DRV_LOG(NOTICE, "getting device info");
return -ENXIO;
@@ -9680,7 +9680,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
/* get phy settings from shmem and 'and' against admin settings */
bnx2x_get_phy_info(sc);
} else {
-/* Left mac of VF unfilled, PF should set it for VF */
+ /* Left mac of VF unfilled, PF should set it for VF */
memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 17075d38..4150fd85 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -6,9 +6,9 @@
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 483d5a17..6a9cd581 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -1,9 +1,9 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
@@ -140,11 +140,13 @@ static int
bnx2x_dev_configure(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.jumbo_frame)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
@@ -447,13 +449,13 @@ static void
bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = sc->max_rx_queues;
dev_info->max_tx_queues = sc->max_tx_queues;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
}
static int
@@ -642,24 +644,14 @@ static struct rte_pci_driver rte_bnx2xvf_pmd;
static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev)
{
- struct rte_eth_dev *eth_dev;
- int ret;
-
- eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct bnx2x_softc));
- if (!eth_dev)
- return -ENOMEM;
-
if (pci_drv == &rte_bnx2x_pmd)
- ret = eth_bnx2x_dev_init(eth_dev);
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct bnx2x_softc), eth_bnx2x_dev_init);
else if (pci_drv == &rte_bnx2xvf_pmd)
- ret = eth_bnx2xvf_dev_init(eth_dev);
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init);
else
- ret = -EINVAL;
-
- if (ret)
- rte_eth_dev_pci_release(eth_dev);
-
- return ret;
+ return -EINVAL;
}
static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
@@ -695,10 +687,10 @@ RTE_INIT(bnx2x_init_log);
static void
bnx2x_init_log(void)
{
- bnx2x_logtype_init = rte_log_register("pmd.bnx2x.init");
+ bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init");
if (bnx2x_logtype_init >= 0)
rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE);
- bnx2x_logtype_driver = rte_log_register("pmd.bnx2x.driver");
+ bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver");
if (bnx2x_logtype_driver >= 0)
rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE);
}
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.h b/drivers/net/bnx2x/bnx2x_ethdev.h
index 37cac158..f05be7ee 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.h
+++ b/drivers/net/bnx2x/bnx2x_ethdev.h
@@ -1,9 +1,9 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h
index 08c1b764..69a2fe1d 100644
--- a/drivers/net/bnx2x/bnx2x_logs.h
+++ b/drivers/net/bnx2x/bnx2x_logs.h
@@ -1,9 +1,9 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index a0d4ac92..331884cf 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -1,9 +1,9 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
@@ -26,7 +26,8 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE);
+ return rte_memzone_reserve_aligned(z_name, ring_size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE);
}
static void
@@ -140,7 +141,8 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
rxq->sw_ring[idx] = mbuf;
- rxq->rx_ring[idx] = mbuf->buf_iova;
+ rxq->rx_ring[idx] =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
}
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
@@ -400,7 +402,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_mb = rxq->sw_ring[bd_cons];
rxq->sw_ring[bd_cons] = new_mb;
- rxq->rx_ring[bd_prod] = new_mb->buf_iova;
+ rxq->rx_ring[bd_prod] =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb));
rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
rte_prefetch0(rxq->sw_ring[rx_pref]);
@@ -409,7 +412,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_prefetch0(&rxq->sw_ring[rx_pref]);
}
- rx_mb->data_off = pad;
+ rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
rx_mb->nb_segs = 1;
rx_mb->next = NULL;
rx_mb->pkt_len = rx_mb->data_len = len;
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index 9600e0f1..94b9e1b6 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -1,9 +1,9 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index b9b85963..e3880abe 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1,14 +1,14 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 3396de31..6fcaf607 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -1,14 +1,14 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c
index 3c08f2a2..dacad771 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/bnx2x/bnx2x_vfpf.c
@@ -1,9 +1,9 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h
index d7cc11be..c4675d4c 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/bnx2x/bnx2x_vfpf.h
@@ -1,9 +1,9 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/ecore_fw_defs.h b/drivers/net/bnx2x/ecore_fw_defs.h
index ab490efa..d10dd108 100644
--- a/drivers/net/bnx2x/ecore_fw_defs.h
+++ b/drivers/net/bnx2x/ecore_fw_defs.h
@@ -1,13 +1,13 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
- * Copyright (c) 2014-2015 QLogic Corporation.
+ * Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h
index 5cce6647..0220e5f9 100644
--- a/drivers/net/bnx2x/ecore_hsi.h
+++ b/drivers/net/bnx2x/ecore_hsi.h
@@ -1,13 +1,13 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
- * Copyright (c) 2014-2015 QLogic Corporation.
+ * Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h
index 4576c565..8d00abb7 100644
--- a/drivers/net/bnx2x/ecore_init.h
+++ b/drivers/net/bnx2x/ecore_init.h
@@ -1,14 +1,14 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h
index b6f98324..dd5df3d5 100644
--- a/drivers/net/bnx2x/ecore_init_ops.h
+++ b/drivers/net/bnx2x/ecore_init_ops.h
@@ -1,14 +1,14 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/ecore_mfw_req.h b/drivers/net/bnx2x/ecore_mfw_req.h
index 57529097..c798c74c 100644
--- a/drivers/net/bnx2x/ecore_mfw_req.h
+++ b/drivers/net/bnx2x/ecore_mfw_req.h
@@ -1,13 +1,13 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
- * Copyright (c) 2014-2015 QLogic Corporation.
+ * Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h
index d8203b45..9800bafc 100644
--- a/drivers/net/bnx2x/ecore_reg.h
+++ b/drivers/net/bnx2x/ecore_reg.h
@@ -1,13 +1,13 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
- * Copyright (c) 2014-2015 QLogic Corporation.
+ * Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index a75a7fa4..75329672 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -1,14 +1,14 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index ff40413c..772c8b1b 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -1,14 +1,14 @@
/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index 9d0f3136..34a29373 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -1,14 +1,14 @@
/*
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
@@ -4143,9 +4143,9 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params,
elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1);
}
-static void elink_warpcore_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint32_t serdes_net_if;
@@ -4222,7 +4222,7 @@ static void elink_warpcore_config_init(struct elink_phy *phy,
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
if (vars->line_speed != ELINK_SPEED_20000) {
PMD_DRV_LOG(DEBUG, "Speed not supported yet");
- return;
+ return 0;
}
PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS");
elink_warpcore_set_20G_DXGXS(sc, phy, lane);
@@ -4242,13 +4242,15 @@ static void elink_warpcore_config_init(struct elink_phy *phy,
PMD_DRV_LOG(DEBUG,
"Unsupported Serdes Net Interface 0x%x",
serdes_net_if);
- return;
+ return 0;
}
}
/* Take lane out of reset after configuration is finished */
elink_warpcore_reset_lane(sc, phy, 0);
PMD_DRV_LOG(DEBUG, "Exit config init");
+
+ return 0;
}
static void elink_warpcore_link_reset(struct elink_phy *phy,
@@ -5226,9 +5228,9 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
return ELINK_STATUS_OK;
}
-static elink_status_t elink_link_settings_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_link_settings_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
@@ -5299,9 +5301,9 @@ static elink_status_t elink_link_settings_status(struct elink_phy *phy,
return rc;
}
-static elink_status_t elink_warpcore_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t lane;
@@ -5520,9 +5522,9 @@ static void elink_set_preemphasis(struct elink_phy *phy,
}
}
-static void elink_xgxs_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) ||
(params->loopback_mode == ELINK_LOOPBACK_XGXS));
@@ -5567,6 +5569,8 @@ static void elink_xgxs_config_init(struct elink_phy *phy,
elink_initialize_sgmii_process(phy, params, vars);
}
+
+ return 0;
}
static elink_status_t elink_prepare_xgxs(struct elink_phy *phy,
@@ -5751,8 +5755,8 @@ static void elink_link_int_ack(struct elink_params *params,
}
}
-static elink_status_t elink_format_ver(uint32_t num, uint8_t * str,
- uint16_t * len)
+static uint8_t elink_format_ver(uint32_t num, uint8_t * str,
+ uint16_t * len)
{
uint8_t *str_ptr = str;
uint32_t mask = 0xf0000000;
@@ -5790,8 +5794,8 @@ static elink_status_t elink_format_ver(uint32_t num, uint8_t * str,
return ELINK_STATUS_OK;
}
-static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
- uint8_t * str, uint16_t * len)
+static uint8_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
+ uint8_t * str, uint16_t * len)
{
str[0] = '\0';
(*len)--;
@@ -6802,9 +6806,9 @@ static void elink_8073_specific_func(struct elink_phy *phy,
}
}
-static elink_status_t elink_8073_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_8073_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val = 0, tmp1;
@@ -7097,9 +7101,9 @@ static void elink_8073_link_reset(__rte_unused struct elink_phy *phy,
/******************************************************************/
/* BNX2X8705 PHY SECTION */
/******************************************************************/
-static elink_status_t elink_8705_config_init(struct elink_phy *phy,
- struct elink_params *params,
- __rte_unused struct elink_vars
+static uint8_t elink_8705_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ __rte_unused struct elink_vars
*vars)
{
struct bnx2x_softc *sc = params->sc;
@@ -8403,9 +8407,9 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
return ELINK_STATUS_OK;
}
-static elink_status_t elink_8706_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_8706_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
return elink_8706_8726_read_status(phy, params, vars);
}
@@ -8477,9 +8481,9 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy,
return link_up;
}
-static elink_status_t elink_8726_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_8726_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726");
@@ -8684,9 +8688,9 @@ static void elink_8727_config_speed(struct elink_phy *phy,
}
}
-static elink_status_t elink_8727_config_init(struct elink_phy *phy,
- struct elink_params *params,
- __rte_unused struct elink_vars
+static uint8_t elink_8727_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ __rte_unused struct elink_vars
*vars)
{
uint32_t tx_en_mode;
@@ -9291,7 +9295,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
return ELINK_STATUS_OK;
}
-static elink_status_t elink_8481_config_init(struct elink_phy *phy,
+static uint8_t elink_8481_config_init(struct elink_phy *phy,
struct elink_params *params,
struct elink_vars *vars)
{
@@ -9442,8 +9446,8 @@ static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc,
return reset_gpios;
}
-static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy,
- struct elink_params *params)
+static void elink_84833_hw_reset_phy(struct elink_phy *phy,
+ struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
uint8_t reset_gpios;
@@ -9471,8 +9475,6 @@ static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy,
MISC_REGISTERS_GPIO_OUTPUT_LOW);
DELAY(10);
PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios);
-
- return ELINK_STATUS_OK;
}
static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy,
@@ -9513,9 +9515,9 @@ static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy,
}
#define PHY84833_CONSTANT_LATENCY 1193
-static elink_status_t elink_848x3_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_848x3_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port, initialize = 1;
@@ -9819,7 +9821,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
return link_up;
}
-static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
+static uint8_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
uint16_t * len)
{
elink_status_t status = ELINK_STATUS_OK;
@@ -10146,9 +10148,9 @@ static void elink_54618se_specific_func(struct elink_phy *phy,
}
}
-static elink_status_t elink_54618se_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_54618se_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port;
@@ -10542,9 +10544,9 @@ static void elink_7101_config_loopback(struct elink_phy *phy,
MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
}
-static elink_status_t elink_7101_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_7101_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
uint16_t fw_ver1, fw_ver2, val;
struct bnx2x_softc *sc = params->sc;
@@ -10614,8 +10616,8 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy,
return link_up;
}
-static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
- uint16_t * len)
+static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
+ uint16_t * len)
{
if (*len < 5)
return ELINK_STATUS_ERROR;
@@ -10680,14 +10682,14 @@ static const struct elink_phy phy_null = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) NULL,
- .read_status = (read_status_t) NULL,
- .link_reset = (link_reset_t) NULL,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = NULL,
+ .read_status = NULL,
+ .link_reset = NULL,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_serdes = {
@@ -10714,14 +10716,14 @@ static const struct elink_phy phy_serdes = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_xgxs_config_init,
- .read_status = (read_status_t) elink_link_settings_status,
- .link_reset = (link_reset_t) elink_int_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_xgxs_config_init,
+ .read_status = elink_link_settings_status,
+ .link_reset = elink_int_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_xgxs = {
@@ -10749,14 +10751,14 @@ static const struct elink_phy phy_xgxs = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_xgxs_config_init,
- .read_status = (read_status_t) elink_link_settings_status,
- .link_reset = (link_reset_t) elink_int_link_reset,
- .config_loopback = (config_loopback_t) elink_set_xgxs_loopback,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) elink_xgxs_specific_func
+ .config_init = elink_xgxs_config_init,
+ .read_status = elink_link_settings_status,
+ .link_reset = elink_int_link_reset,
+ .config_loopback = elink_set_xgxs_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = elink_xgxs_specific_func
};
static const struct elink_phy phy_warpcore = {
@@ -10785,14 +10787,14 @@ static const struct elink_phy phy_warpcore = {
.speed_cap_mask = 0,
/* req_duplex = */ 0,
/* rsrv = */ 0,
- .config_init = (config_init_t) elink_warpcore_config_init,
- .read_status = (read_status_t) elink_warpcore_read_status,
- .link_reset = (link_reset_t) elink_warpcore_link_reset,
- .config_loopback = (config_loopback_t) elink_set_warpcore_loopback,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) elink_warpcore_hw_reset,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_warpcore_config_init,
+ .read_status = elink_warpcore_read_status,
+ .link_reset = elink_warpcore_link_reset,
+ .config_loopback = elink_set_warpcore_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = elink_warpcore_hw_reset,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_7101 = {
@@ -10814,14 +10816,14 @@ static const struct elink_phy phy_7101 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_7101_config_init,
- .read_status = (read_status_t) elink_7101_read_status,
- .link_reset = (link_reset_t) elink_common_ext_link_reset,
- .config_loopback = (config_loopback_t) elink_7101_config_loopback,
- .format_fw_ver = (format_fw_ver_t) elink_7101_format_ver,
- .hw_reset = (hw_reset_t) elink_7101_hw_reset,
- .set_link_led = (set_link_led_t) elink_7101_set_link_led,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_7101_config_init,
+ .read_status = elink_7101_read_status,
+ .link_reset = elink_common_ext_link_reset,
+ .config_loopback = elink_7101_config_loopback,
+ .format_fw_ver = elink_7101_format_ver,
+ .hw_reset = elink_7101_hw_reset,
+ .set_link_led = elink_7101_set_link_led,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_8073 = {
@@ -10845,14 +10847,14 @@ static const struct elink_phy phy_8073 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8073_config_init,
- .read_status = (read_status_t) elink_8073_read_status,
- .link_reset = (link_reset_t) elink_8073_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) elink_8073_specific_func
+ .config_init = elink_8073_config_init,
+ .read_status = elink_8073_read_status,
+ .link_reset = elink_8073_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = elink_8073_specific_func
};
static const struct elink_phy phy_8705 = {
@@ -10873,14 +10875,14 @@ static const struct elink_phy phy_8705 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8705_config_init,
- .read_status = (read_status_t) elink_8705_read_status,
- .link_reset = (link_reset_t) elink_common_ext_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_null_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_8705_config_init,
+ .read_status = elink_8705_read_status,
+ .link_reset = elink_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_null_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_8706 = {
@@ -10902,14 +10904,14 @@ static const struct elink_phy phy_8706 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8706_config_init,
- .read_status = (read_status_t) elink_8706_read_status,
- .link_reset = (link_reset_t) elink_common_ext_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_8706_config_init,
+ .read_status = elink_8706_read_status,
+ .link_reset = elink_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_8726 = {
@@ -10932,14 +10934,14 @@ static const struct elink_phy phy_8726 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8726_config_init,
- .read_status = (read_status_t) elink_8726_read_status,
- .link_reset = (link_reset_t) elink_8726_link_reset,
- .config_loopback = (config_loopback_t) elink_8726_config_loopback,
- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_8726_config_init,
+ .read_status = elink_8726_read_status,
+ .link_reset = elink_8726_link_reset,
+ .config_loopback = elink_8726_config_loopback,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_8727 = {
@@ -10961,14 +10963,14 @@ static const struct elink_phy phy_8727 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8727_config_init,
- .read_status = (read_status_t) elink_8727_read_status,
- .link_reset = (link_reset_t) elink_8727_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
- .hw_reset = (hw_reset_t) elink_8727_hw_reset,
- .set_link_led = (set_link_led_t) elink_8727_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_8727_specific_func
+ .config_init = elink_8727_config_init,
+ .read_status = elink_8727_read_status,
+ .link_reset = elink_8727_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = elink_8727_hw_reset,
+ .set_link_led = elink_8727_set_link_led,
+ .phy_specific_func = elink_8727_specific_func
};
static const struct elink_phy phy_8481 = {
@@ -10996,14 +10998,14 @@ static const struct elink_phy phy_8481 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8481_config_init,
- .read_status = (read_status_t) elink_848xx_read_status,
- .link_reset = (link_reset_t) elink_8481_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
- .hw_reset = (hw_reset_t) elink_8481_hw_reset,
- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_8481_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_8481_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = elink_8481_hw_reset,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_84823 = {
@@ -11031,14 +11033,14 @@ static const struct elink_phy phy_84823 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_848x3_config_init,
- .read_status = (read_status_t) elink_848xx_read_status,
- .link_reset = (link_reset_t) elink_848x3_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
+ .config_init = elink_848x3_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = elink_848xx_specific_func
};
static const struct elink_phy phy_84833 = {
@@ -11065,14 +11067,14 @@ static const struct elink_phy phy_84833 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_848x3_config_init,
- .read_status = (read_status_t) elink_848xx_read_status,
- .link_reset = (link_reset_t) elink_848x3_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
- .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
+ .config_init = elink_848x3_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = elink_84833_hw_reset_phy,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = elink_848xx_specific_func
};
static const struct elink_phy phy_84834 = {
@@ -11098,14 +11100,14 @@ static const struct elink_phy phy_84834 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_848x3_config_init,
- .read_status = (read_status_t) elink_848xx_read_status,
- .link_reset = (link_reset_t) elink_848x3_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
- .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
+ .config_init = elink_848x3_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = elink_84833_hw_reset_phy,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = elink_848xx_specific_func
};
static const struct elink_phy phy_54618se = {
@@ -11131,14 +11133,14 @@ static const struct elink_phy phy_54618se = {
.speed_cap_mask = 0,
/* req_duplex = */ 0,
/* rsrv = */ 0,
- .config_init = (config_init_t) elink_54618se_config_init,
- .read_status = (read_status_t) elink_54618se_read_status,
- .link_reset = (link_reset_t) elink_54618se_link_reset,
- .config_loopback = (config_loopback_t) elink_54618se_config_loopback,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) elink_5461x_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_54618se_specific_func
+ .config_init = elink_54618se_config_init,
+ .read_status = elink_54618se_read_status,
+ .link_reset = elink_54618se_link_reset,
+ .config_loopback = elink_54618se_config_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = elink_5461x_set_link_led,
+ .phy_specific_func = elink_54618se_specific_func
};
/*****************************************************************/
@@ -12919,7 +12921,7 @@ static void elink_check_kr2_wa(struct elink_params *params,
*/
not_kr2_device = (((base_page & 0x8000) == 0) ||
(((base_page & 0x8000) &&
- ((next_page & 0xe0) == 0x2))));
+ ((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index 9401b7cd..236f9367 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -1,14 +1,14 @@
/*
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
index 2aa04411..fd0cb523 100644
--- a/drivers/net/bnxt/Makefile
+++ b/drivers/net/bnxt/Makefile
@@ -1,35 +1,8 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright(c) 2014 6WIND S.A.
-# Copyright(c) Broadcom Limited.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation.
+# Copyright(c) 2014 6WIND S.A.
+# Copyright(c) Broadcom Limited.
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index b5a0badf..afaaf8c4 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_H_
@@ -51,6 +23,7 @@
#define BNXT_MAX_MTU 9500
#define VLAN_TAG_SIZE 4
#define BNXT_MAX_LED 4
+#define BNXT_NUM_VLANS 2
struct bnxt_led_info {
uint8_t led_id;
@@ -236,6 +209,7 @@ struct bnxt {
struct rte_eth_dev *eth_dev;
struct rte_eth_rss_conf rss_conf;
struct rte_pci_device *pdev;
+ void *doorbell_base;
uint32_t flags;
#define BNXT_FLAG_REGISTERED (1 << 0)
@@ -246,6 +220,7 @@ struct bnxt {
#define BNXT_FLAG_UPDATE_HASH (1 << 5)
#define BNXT_FLAG_PTP_SUPPORTED (1 << 6)
#define BNXT_FLAG_MULTI_HOST (1 << 7)
+#define BNXT_FLAG_NEW_RM (1 << 30)
#define BNXT_FLAG_INIT_DONE (1 << 31)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
@@ -300,6 +275,7 @@ struct bnxt {
struct bnxt_link_info link_info;
struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT];
+ uint8_t tx_cosq_id;
uint16_t fw_fid;
uint8_t dflt_mac_addr[ETHER_ADDR_LEN];
@@ -321,7 +297,7 @@ struct bnxt {
uint16_t vxlan_fw_dst_port_id;
uint16_t geneve_fw_dst_port_id;
uint32_t fw_ver;
- rte_atomic64_t rx_mbuf_alloc_fail;
+ uint32_t hwrm_spec_code;
struct bnxt_led_info leds[BNXT_MAX_LED];
uint8_t num_leds;
@@ -331,8 +307,6 @@ struct bnxt {
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
-#define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG 0x6
-
bool is_bnxt_supported(struct rte_eth_dev *dev);
extern const struct rte_flow_ops bnxt_flow_ops;
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 737bb060..ff20b6fd 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <rte_malloc.h>
@@ -55,6 +27,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
+ /* FALLTHROUGH */
bnxt_link_update_op(bp->eth_dev, 1);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
@@ -159,69 +132,31 @@ reject:
return;
}
-/* For the default completion ring only */
-int bnxt_alloc_def_cp_ring(struct bnxt *bp)
+int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
{
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
- int rc;
-
- rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
- 0, HWRM_NA_SIGNATURE,
- HWRM_NA_SIGNATURE);
- if (rc)
- goto err_out;
- cpr->cp_doorbell = bp->pdev->mem_resource[2].addr;
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- if (BNXT_PF(bp))
- rc = bnxt_hwrm_func_cfg_def_cp(bp);
- else
- rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
-
-err_out:
- return rc;
-}
+ bool evt = 0;
-void bnxt_free_def_cp_ring(struct bnxt *bp)
-{
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
-
- if (cpr == NULL)
- return;
+ if (bp == NULL || cmp == NULL) {
+ PMD_DRV_LOG(ERR, "invalid NULL argument\n");
+ return evt;
+ }
- bnxt_free_ring(cpr->cp_ring_struct);
- cpr->cp_ring_struct = NULL;
- rte_free(cpr->cp_ring_struct);
- rte_free(cpr);
- bp->def_cp_ring = NULL;
-}
+ switch (CMP_TYPE(cmp)) {
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ /* Handle any async event */
+ bnxt_handle_async_event(bp, cmp);
+ evt = 1;
+ break;
+ case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+ /* Handle HWRM forwarded responses */
+ bnxt_handle_fwd_req(bp, cmp);
+ evt = 1;
+ break;
+ default:
+ /* Ignore any other events */
+ PMD_DRV_LOG(INFO, "Ignoring %02x completion\n", CMP_TYPE(cmp));
+ break;
+ }
-/* For the default completion ring only */
-int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id)
-{
- struct bnxt_cp_ring_info *cpr;
- struct bnxt_ring *ring;
-
- cpr = rte_zmalloc_socket("cpr",
- sizeof(struct bnxt_cp_ring_info),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (cpr == NULL)
- return -ENOMEM;
- bp->def_cp_ring = cpr;
-
- ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
- sizeof(struct bnxt_ring),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (ring == NULL)
- return -ENOMEM;
- cpr->cp_ring_struct = ring;
- ring->bd = (void *)cpr->cp_desc_ring;
- ring->bd_dma = cpr->cp_desc_mapping;
- ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
- ring->ring_mask = ring->ring_size - 1;
- ring->vmem_size = 0;
- ring->vmem = NULL;
-
- return 0;
+ return evt;
}
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index ce2b0cb8..6c1e6d2b 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_CPR_H_
@@ -100,12 +72,9 @@ struct bnxt_cp_ring_info {
#define RX_CMP_L2_ERRORS \
(RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR)
-
struct bnxt;
-int bnxt_alloc_def_cp_ring(struct bnxt *bp);
-void bnxt_free_def_cp_ring(struct bnxt *bp);
-int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id);
void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
+int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp);
#endif
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 21c46f83..6e56bfd3 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -57,12 +29,13 @@
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
- "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
+ "Broadcom NetXtreme driver " DRV_MODULE_NAME "\n";
int bnxt_logtype_driver;
#define PCI_VENDOR_ID_BROADCOM 0x14E4
-#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
+#define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606
+#define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609
#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
#define BROADCOM_DEV_ID_57414_VF 0x16c1
#define BROADCOM_DEV_ID_57301 0x16c8
@@ -97,10 +70,15 @@ int bnxt_logtype_driver;
#define BROADCOM_DEV_ID_57407_MF 0x16ea
#define BROADCOM_DEV_ID_57414_MF 0x16ec
#define BROADCOM_DEV_ID_57416_MF 0x16ee
+#define BROADCOM_DEV_ID_58802 0xd802
+#define BROADCOM_DEV_ID_58804 0xd804
+#define BROADCOM_DEV_ID_58808 0x16f0
static const struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
- BROADCOM_DEV_ID_STRATUS_NIC_VF) },
+ BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
+ BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
@@ -135,6 +113,9 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -146,8 +127,31 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ DEV_TX_OFFLOAD_GRE_TNL_TSO | \
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_TCP_LRO)
+
static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
+static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
/***********************/
@@ -164,23 +168,12 @@ static void bnxt_free_mem(struct bnxt *bp)
bnxt_free_stats(bp);
bnxt_free_tx_rings(bp);
bnxt_free_rx_rings(bp);
- bnxt_free_def_cp_ring(bp);
}
static int bnxt_alloc_mem(struct bnxt *bp)
{
int rc;
- /* Default completion ring */
- rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
- if (rc)
- goto alloc_mem_err;
-
- rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
- bp->def_cp_ring, "def_cp");
- if (rc)
- goto alloc_mem_err;
-
rc = bnxt_alloc_vnic_mem(bp);
if (rc)
goto alloc_mem_err;
@@ -215,10 +208,12 @@ static int bnxt_init_chip(struct bnxt *bp)
rte_intr_disable(intr_handle);
if (bp->eth_dev->data->mtu > ETHER_MTU) {
- bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ bp->eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
bp->flags |= BNXT_FLAG_JUMBO;
} else {
- bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ bp->eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
bp->flags &= ~BNXT_FLAG_JUMBO;
}
@@ -289,7 +284,8 @@ static int bnxt_init_chip(struct bnxt *bp)
bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
- if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ if (bp->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_TCP_LRO)
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
else
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
@@ -389,10 +385,6 @@ static int bnxt_init_nic(struct bnxt *bp)
bnxt_init_vnics(bp);
bnxt_init_filters(bp);
- rc = bnxt_init_chip(bp);
- if (rc)
- return rc;
-
return 0;
}
@@ -407,8 +399,6 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
uint16_t max_vnics, i, j, vpool, vrxq;
unsigned int max_rx_rings;
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
-
/* MAC Specifics */
dev_info->max_mac_addrs = bp->max_l2_ctx;
dev_info->max_hash_mac_addrs = 0;
@@ -416,9 +406,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
/* PF/VF specifics */
if (BNXT_PF(bp))
dev_info->max_vfs = bp->pdev->max_vfs;
- max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
- RTE_MIN(bp->max_rsscos_ctx,
- bp->max_stat_ctx)));
+ max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx);
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
@@ -430,21 +418,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
+ VLAN_TAG_SIZE;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+
+ dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
+ if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
+ dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
/* *INDENT-OFF* */
dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -454,7 +433,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
.wthresh = 0,
},
.rx_free_thresh = 32,
- .rx_drop_en = 0,
+ /* If no descriptors available, pkts are dropped by default */
+ .rx_drop_en = 1,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -465,8 +445,6 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
},
.tx_free_thresh = 32,
.tx_rs_thresh = 32,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
};
eth_dev->data->dev_conf.intr_conf.lsc = 1;
@@ -510,18 +488,38 @@ found:
static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
bp->rx_queues = (void *)eth_dev->data->rx_queues;
bp->tx_queues = (void *)eth_dev->data->tx_queues;
+ bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
+ bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
+
+ if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
+ int rc;
+
+ rc = bnxt_hwrm_func_reserve_vf_resc(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
+ return -ENOSPC;
+ }
+
+ /* legacy driver needs to get updated values */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
+ return -ENOSPC;
+ }
+ }
/* Inherit new configurations */
if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
- eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + 1 >
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_cp_rings ||
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_stat_ctx ||
- (uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
+ (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) {
PMD_DRV_LOG(ERR,
"Insufficient resources to support requested config\n");
PMD_DRV_LOG(ERR,
@@ -535,15 +533,16 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
return -ENOSPC;
}
- bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
- bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
bp->rx_cp_nr_rings = bp->rx_nr_rings;
bp->tx_cp_nr_rings = bp->tx_nr_rings;
- if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
+ ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS;
+ bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
+ }
return 0;
}
@@ -571,6 +570,7 @@ static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
int vlan_mask = 0;
int rc;
@@ -581,15 +581,15 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
}
bp->dev_stopped = 0;
- rc = bnxt_init_nic(bp);
+ rc = bnxt_init_chip(bp);
if (rc)
goto error;
bnxt_link_update_op(eth_dev, 1);
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
vlan_mask |= ETH_VLAN_FILTER_MASK;
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
vlan_mask |= ETH_VLAN_STRIP_MASK;
rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
if (rc)
@@ -635,13 +635,15 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ bp->flags &= ~BNXT_FLAG_INIT_DONE;
if (bp->eth_dev->data->dev_started) {
/* TBD: STOP HW queues DMA */
eth_dev->data->dev_link.link_status = 0;
}
bnxt_set_hwrm_link_config(bp, false);
bnxt_hwrm_port_clr_stats(bp);
- bp->flags &= ~BNXT_FLAG_INIT_DONE;
+ bnxt_free_tx_mbufs(bp);
+ bnxt_free_rx_mbufs(bp);
bnxt_shutdown_nic(bp);
bp->dev_stopped = 1;
}
@@ -653,8 +655,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
if (bp->dev_stopped == 0)
bnxt_dev_stop_op(eth_dev);
- bnxt_free_tx_mbufs(bp);
- bnxt_free_rx_mbufs(bp);
bnxt_free_mem(bp);
if (eth_dev->data->mac_addrs != NULL) {
rte_free(eth_dev->data->mac_addrs);
@@ -771,6 +771,11 @@ out:
new.link_speed != eth_dev->data->dev_link.link_speed) {
memcpy(&eth_dev->data->dev_link, &new,
sizeof(struct rte_eth_link));
+
+ _rte_eth_dev_callback_process(eth_dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+
bnxt_print_link_info(eth_dev);
}
@@ -1366,30 +1371,31 @@ static int
bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
unsigned int i;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_filter) {
+ if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
/* Remove any VLAN filters programmed */
for (i = 0; i < 4095; i++)
bnxt_del_vlan_filter(bp, i);
}
PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
- dev->data->dev_conf.rxmode.hw_vlan_filter);
+ !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
}
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
vnic->vlan_strip = true;
else
vnic->vlan_strip = false;
bnxt_hwrm_vnic_cfg(bp, vnic);
}
PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
- dev->data->dev_conf.rxmode.hw_vlan_strip);
+ !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
}
if (mask & ETH_VLAN_EXTEND_MASK)
@@ -1398,7 +1404,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
return 0;
}
-static void
+static int
bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
@@ -1408,7 +1414,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
int rc;
if (BNXT_VF(bp))
- return;
+ return -EPERM;
memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
@@ -1418,7 +1424,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
continue;
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
- break;
+ return rc;
memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
@@ -1427,10 +1433,12 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
if (rc)
- break;
+ return rc;
filter->mac_index = 0;
PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
}
+
+ return 0;
}
static int
@@ -1515,7 +1523,6 @@ bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = 0;
- qinfo->conf.txq_flags = txq->txq_flags;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
@@ -1540,9 +1547,11 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
if (new_mtu > ETHER_MTU) {
bp->flags |= BNXT_FLAG_JUMBO;
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ bp->eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
} else {
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ bp->eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
bp->flags &= ~BNXT_FLAG_JUMBO;
}
@@ -2358,7 +2367,8 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
}
static struct bnxt_filter_info *
-bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf)
+bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
+ struct bnxt_vnic_info **mvnic)
{
struct bnxt_filter_info *mf = NULL;
int i;
@@ -2396,8 +2406,11 @@ bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf)
!memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
sizeof(nf->dst_ipaddr)) &&
!memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
- sizeof(nf->dst_ipaddr_mask)))
+ sizeof(nf->dst_ipaddr_mask))) {
+ if (mvnic)
+ *mvnic = vnic;
return mf;
+ }
}
}
return NULL;
@@ -2411,7 +2424,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg;
struct bnxt_filter_info *filter, *match;
- struct bnxt_vnic_info *vnic;
+ struct bnxt_vnic_info *vnic, *mvnic;
int ret = 0, i;
if (filter_op == RTE_ETH_FILTER_NOP)
@@ -2436,11 +2449,31 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
goto free_filter;
filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
- match = bnxt_match_fdir(bp, filter);
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
+ vnic = STAILQ_FIRST(&bp->ff_pool[0]);
+ else
+ vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
+
+ match = bnxt_match_fdir(bp, filter, &mvnic);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- PMD_DRV_LOG(ERR, "Flow already exists.\n");
- ret = -EEXIST;
- goto free_filter;
+ if (match->dst_id == vnic->fw_vnic_id) {
+ PMD_DRV_LOG(ERR, "Flow already exists.\n");
+ ret = -EEXIST;
+ goto free_filter;
+ } else {
+ match->dst_id = vnic->fw_vnic_id;
+ ret = bnxt_hwrm_set_ntuple_filter(bp,
+ match->dst_id,
+ match);
+ STAILQ_REMOVE(&mvnic->filter, match,
+ bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&vnic->filter, match, next);
+ PMD_DRV_LOG(ERR,
+ "Filter with matching pattern exist\n");
+ PMD_DRV_LOG(ERR,
+ "Updated it to new destination q\n");
+ goto free_filter;
+ }
}
if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
PMD_DRV_LOG(ERR, "Flow does not exist.\n");
@@ -2448,12 +2481,6 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
goto free_filter;
}
- if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
- vnic = STAILQ_FIRST(&bp->ff_pool[0]);
- else
- vnic =
- STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
-
if (filter_op == RTE_ETH_FILTER_ADD) {
ret = bnxt_hwrm_set_ntuple_filter(bp,
filter->dst_id,
@@ -2489,7 +2516,6 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_UPDATE:
case RTE_ETH_FILTER_STATS:
case RTE_ETH_FILTER_INFO:
- /* FALLTHROUGH */
PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
break;
default:
@@ -2876,6 +2902,7 @@ static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
case BNX_DIR_TYPE_KONG_PATCH:
case BNX_DIR_TYPE_BONO_FW:
case BNX_DIR_TYPE_BONO_PATCH:
+ /* FALLTHROUGH */
return true;
}
@@ -2894,6 +2921,7 @@ static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
case BNX_DIR_TYPE_ISCSI_BOOT:
case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+ /* FALLTHROUGH */
return true;
}
@@ -3032,7 +3060,8 @@ static bool bnxt_vf_pciid(uint16_t id)
id == BROADCOM_DEV_ID_5731X_VF ||
id == BROADCOM_DEV_ID_5741X_VF ||
id == BROADCOM_DEV_ID_57414_VF ||
- id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF2)
return true;
return false;
}
@@ -3060,11 +3089,23 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
rc = -ENOMEM;
goto init_err_release;
}
+
+ if (!pci_dev->mem_resource[2].addr) {
+ PMD_DRV_LOG(ERR,
+ "Cannot find PCI device BAR 2 address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_release;
+ } else {
+ bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
+ }
+
return 0;
init_err_release:
if (bp->bar0)
bp->bar0 = NULL;
+ if (bp->doorbell_base)
+ bp->doorbell_base = NULL;
init_err_disable:
@@ -3098,7 +3139,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
bp = eth_dev->data->dev_private;
- rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
bp->dev_stopped = 1;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -3115,12 +3155,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
}
skip_init:
eth_dev->dev_ops = &bnxt_dev_ops;
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
- if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
+ if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
pci_dev->addr.bus, pci_dev->addr.devid,
@@ -3131,9 +3171,10 @@ skip_init:
sizeof(struct rx_port_stats) + 512);
if (!mz) {
mz = rte_memzone_reserve(mz_name, total_alloc_len,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY);
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
if (mz == NULL)
return -ENOMEM;
}
@@ -3165,10 +3206,12 @@ skip_init:
total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
sizeof(struct tx_port_stats) + 512);
if (!mz) {
- mz = rte_memzone_reserve(mz_name, total_alloc_len,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY);
+ mz = rte_memzone_reserve(mz_name,
+ total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
if (mz == NULL)
return -ENOMEM;
}
@@ -3236,7 +3279,7 @@ skip_init:
goto error_free;
}
- if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
PMD_DRV_LOG(ERR,
"Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
@@ -3344,17 +3387,13 @@ skip_init:
if (rc)
goto error_free_int;
- rc = bnxt_alloc_def_cp_ring(bp);
- if (rc)
- goto error_free_int;
-
bnxt_enable_int(bp);
+ bnxt_init_nic(bp);
return 0;
error_free_int:
bnxt_disable_int(bp);
- bnxt_free_def_cp_ring(bp);
bnxt_hwrm_func_buf_unrgtr(bp);
bnxt_free_int(bp);
bnxt_free_mem(bp);
@@ -3438,7 +3477,7 @@ bnxt_init_log(void)
{
bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
if (bnxt_logtype_driver >= 0)
- rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO);
}
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 032e8eed..e36da997 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -1,38 +1,11 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <sys/queue.h>
+#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_flow.h>
@@ -96,9 +69,9 @@ void bnxt_init_filters(struct bnxt *bp)
STAILQ_INIT(&bp->free_filter_list);
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
- filter->fw_l2_filter_id = -1;
- filter->fw_em_filter_id = -1;
- filter->fw_ntuple_filter_id = -1;
+ filter->fw_l2_filter_id = UINT64_MAX;
+ filter->fw_em_filter_id = UINT64_MAX;
+ filter->fw_ntuple_filter_id = UINT64_MAX;
STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
}
}
@@ -159,6 +132,14 @@ void bnxt_free_filter_mem(struct bnxt *bp)
rte_free(bp->filter_info);
bp->filter_info = NULL;
+
+ for (i = 0; i < bp->pf.max_vfs; i++) {
+ STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
+ rte_free(filter);
+ STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
+ bnxt_filter_info, next);
+ }
+ }
}
int bnxt_alloc_filter_mem(struct bnxt *bp)
@@ -250,7 +231,7 @@ nxt_non_void_action(const struct rte_flow_action *cur)
}
}
-int check_zero_bytes(const uint8_t *bytes, int len)
+int bnxt_check_zero_bytes(const uint8_t *bytes, int len)
{
int i;
for (i = 0; i < len; i++)
@@ -302,6 +283,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
static int
bnxt_validate_and_parse_flow_type(struct bnxt *bp,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
struct rte_flow_error *error,
struct bnxt_filter_info *filter)
@@ -326,6 +308,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
uint32_t vf = 0;
int use_ntuple;
uint32_t en = 0;
+ uint32_t en_ethertype;
int dflt_vnic;
use_ntuple = bnxt_filter_type_check(pattern, error);
@@ -335,6 +318,9 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
filter->filter_type = use_ntuple ?
HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
+ en_ethertype = use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
+ EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
while (item->type != RTE_FLOW_ITEM_TYPE_END) {
if (item->last) {
@@ -354,8 +340,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
}
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Source MAC address mask cannot be partially set.
* Should be All 0's or all 1's.
@@ -374,7 +360,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
}
/* Mask is not allowed. Only exact matches are */
- if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ if (eth_mask->type &&
+ eth_mask->type != RTE_BE16(0xffff)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -400,41 +387,58 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
* RTE_LOG(ERR, PMD, "Handle this condition\n");
* }
*/
- if (eth_spec->type) {
+ if (eth_mask->type) {
filter->ethertype =
rte_be_to_cpu_16(eth_spec->type);
- en |= use_ntuple ?
- NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
- EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
+ en |= en_ethertype;
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
- if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (en & en_ethertype) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN TPID matching is not"
+ " supported");
+ return -rte_errno;
+ }
+ if (vlan_mask->tci &&
+ vlan_mask->tci == RTE_BE16(0x0fff)) {
/* Only the VLAN ID can be matched. */
filter->l2_ovlan =
rte_be_to_cpu_16(vlan_spec->tci &
- 0xFFF);
+ RTE_BE16(0x0fff));
en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
- } else {
+ } else if (vlan_mask->tci) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"VLAN mask is invalid");
return -rte_errno;
}
+ if (vlan_mask->inner_type &&
+ vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "inner ethertype mask not"
+ " valid");
+ return -rte_errno;
+ }
+ if (vlan_mask->inner_type) {
+ filter->ethertype =
+ rte_be_to_cpu_16(vlan_spec->inner_type);
+ en |= en_ethertype;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
/* If mask is not involved, we could use EM filters. */
- ipv4_spec =
- (const struct rte_flow_item_ipv4 *)item->spec;
- ipv4_mask =
- (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
/* Only IP DST and SRC fields are maskable. */
if (ipv4_mask->hdr.version_ihl ||
ipv4_mask->hdr.type_of_service ||
@@ -483,10 +487,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
}
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- ipv6_spec =
- (const struct rte_flow_item_ipv6 *)item->spec;
- ipv6_mask =
- (const struct rte_flow_item_ipv6 *)item->mask;
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
/* Only IP DST and SRC fields are maskable. */
if (ipv6_mask->hdr.vtc_flow ||
@@ -510,13 +512,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
ipv6_spec->hdr.src_addr, 16);
rte_memcpy(filter->dst_ipaddr,
ipv6_spec->hdr.dst_addr, 16);
- if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
+ 16)) {
rte_memcpy(filter->src_ipaddr_mask,
ipv6_mask->hdr.src_addr, 16);
en |= !use_ntuple ? 0 :
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
}
- if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
+ 16)) {
rte_memcpy(filter->dst_ipaddr_mask,
ipv6_mask->hdr.dst_addr, 16);
en |= !use_ntuple ? 0 :
@@ -527,8 +531,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
/* Check TCP mask. Only DST & SRC ports are maskable */
if (tcp_mask->hdr.sent_seq ||
@@ -564,8 +568,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
}
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_spec = item->spec;
+ udp_mask = item->mask;
if (udp_mask->hdr.dgram_len ||
udp_mask->hdr.dgram_cksum) {
@@ -597,10 +601,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
}
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan_spec =
- (const struct rte_flow_item_vxlan *)item->spec;
- vxlan_mask =
- (const struct rte_flow_item_vxlan *)item->mask;
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
/* Check if VXLAN item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
@@ -646,10 +648,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
}
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- nvgre_spec =
- (const struct rte_flow_item_nvgre *)item->spec;
- nvgre_mask =
- (const struct rte_flow_item_nvgre *)item->mask;
+ nvgre_spec = item->spec;
+ nvgre_mask = item->mask;
/* Check if NVGRE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
@@ -692,7 +692,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
}
break;
case RTE_FLOW_ITEM_TYPE_VF:
- vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ vf_spec = item->spec;
vf = vf_spec->id;
if (!BNXT_PF(bp)) {
rte_flow_error_set(error, EINVAL,
@@ -710,6 +710,16 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
return -rte_errno;
}
+ if (!attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Matching VF traffic without"
+ " affecting it (transfer attribute)"
+ " is unsupported");
+ return -rte_errno;
+ }
+
filter->mirror_vnic_id =
dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
if (dflt_vnic < 0) {
@@ -836,7 +846,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
- rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
+ rc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error,
+ filter);
if (rc != 0)
goto ret;
@@ -844,7 +855,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
if (rc != 0)
goto ret;
//Since we support ingress attribute only - right now.
- filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
@@ -956,11 +968,6 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
- if (filter1) {
- bnxt_free_filter(bp, filter1);
- filter1->fw_l2_filter_id = -1;
- }
-
act = nxt_non_void_action(++act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
rte_flow_error_set(error, EINVAL,
@@ -997,7 +1004,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,
ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
error, filter);
/* No need to hold on to this filter if we are just validating flow */
- filter->fw_l2_filter_id = -1;
+ filter->fw_l2_filter_id = UINT64_MAX;
bnxt_free_filter(bp, filter);
return ret;
@@ -1186,8 +1193,8 @@ bnxt_flow_destroy(struct rte_eth_dev *dev,
ret = bnxt_hwrm_clear_em_filter(bp, filter);
if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-
- bnxt_hwrm_clear_l2_filter(bp, filter);
+ else
+ ret = bnxt_hwrm_clear_l2_filter(bp, filter);
if (!ret) {
STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
rte_free(flow);
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index a3c702df..d27be703 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_FILTER_H_
@@ -97,7 +69,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
-int check_zero_bytes(const uint8_t *bytes, int len);
+int bnxt_check_zero_bytes(const uint8_t *bytes, int len);
#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index b7843afe..d6fdc1b8 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <unistd.h>
@@ -55,6 +27,8 @@
#include <rte_io.h>
#define HWRM_CMD_TIMEOUT 10000
+#define HWRM_SPEC_CODE_1_8_3 0x10803
+#define HWRM_VERSION_1_9_1 0x10901
struct bnxt_plcmodes_cfg {
uint32_t flags;
@@ -115,7 +89,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
short_input.req_type = rte_cpu_to_le_16(req->req_type);
short_input.signature = rte_cpu_to_le_16(
- HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
+ HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
short_input.size = rte_cpu_to_le_16(msg_len);
short_input.req_addr =
rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
@@ -248,6 +222,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t mask = 0;
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return rc;
+
HWRM_PREP(req, CFA_L2_SET_RX_MASK);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
@@ -344,7 +321,7 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- filter->fw_l2_filter_id = -1;
+ filter->fw_l2_filter_id = UINT64_MAX;
return 0;
}
@@ -436,16 +413,18 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
HWRM_PREP(req, PORT_MAC_CFG);
if (ptp->rx_filter)
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
else
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ flags |=
+ HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
if (ptp->tx_tstamp_en)
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+ flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
else
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+ flags |=
+ HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
req.flags = rte_cpu_to_le_32(flags);
- req.enables =
- rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req.enables = rte_cpu_to_le_32
+ (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -473,7 +452,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
HWRM_CHECK_RESULT();
- if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
+ if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
return 0;
ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
@@ -505,7 +484,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
return 0;
}
-int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {.req_type = 0 };
@@ -595,6 +574,20 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
return rc;
}
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc;
+
+ rc = __bnxt_hwrm_func_qcaps(bp);
+ if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
+ rc = bnxt_hwrm_func_resc_qcaps(bp);
+ if (!rc)
+ bp->flags |= BNXT_FLAG_NEW_RM;
+ }
+
+ return rc;
+}
+
int bnxt_hwrm_func_reset(struct bnxt *bp)
{
int rc = 0;
@@ -631,10 +624,19 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
if (BNXT_PF(bp)) {
req.enables |= rte_cpu_to_le_32(
- HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
RTE_MIN(sizeof(req.vf_req_fwd),
sizeof(bp->pf.vf_req_fwd)));
+
+ /*
+ * PF can sniff HWRM API issued by VF. This can be set up by
+ * linux driver and inherited by the DPDK PF driver. Clear
+ * this HWRM sniffer list in FW because DPDK PF driver does
+ * not support this.
+ */
+ req.flags =
+ rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
}
req.async_event_fwd[0] |=
@@ -655,6 +657,64 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
return rc;
}
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
+{
+ int rc;
+ struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_vf_cfg_input req = {0};
+
+ HWRM_PREP(req, FUNC_VF_CFG);
+
+ req.enables = rte_cpu_to_le_32
+ (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+
+ req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
+ req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
+ AGG_RING_MULTIPLIER);
+ req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
+ req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
+ bp->tx_nr_rings);
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ return rc;
+}
+
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+ int rc;
+ struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_resource_qcaps_input req = {0};
+
+ HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ if (BNXT_VF(bp)) {
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+ }
+
+ HWRM_UNLOCK();
+ return rc;
+}
+
int bnxt_hwrm_ver_get(struct bnxt *bp)
{
int rc = 0;
@@ -678,11 +738,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_CHECK_RESULT();
PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
- resp->hwrm_intf_maj, resp->hwrm_intf_min,
- resp->hwrm_intf_upd,
- resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
- bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
- (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
+ resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
+ bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
+ (resp->hwrm_fw_min_8b << 16) |
+ (resp->hwrm_fw_bld_8b << 8) |
+ resp->hwrm_fw_rsvd_8b;
PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
@@ -690,11 +752,12 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
my_version |= HWRM_VERSION_MINOR << 8;
my_version |= HWRM_VERSION_UPDATE;
- fw_version = resp->hwrm_intf_maj << 16;
- fw_version |= resp->hwrm_intf_min << 8;
- fw_version |= resp->hwrm_intf_upd;
+ fw_version = resp->hwrm_intf_maj_8b << 16;
+ fw_version |= resp->hwrm_intf_min_8b << 8;
+ fw_version |= resp->hwrm_intf_upd_8b;
+ bp->hwrm_spec_code = fw_version;
- if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
+ if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -EINVAL;
goto error;
@@ -750,7 +813,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
if ((dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg &
- HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
PMD_DRV_LOG(DEBUG, "Short command supported\n");
rte_free(bp->hwrm_short_cmd_req_addr);
@@ -919,9 +982,15 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
int rc = 0;
struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int i;
HWRM_PREP(req, QUEUE_QPORTCFG);
+ req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
+ /* HWRM Version >= 1.9.1 */
+ if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
+ req.drv_qmap_cap =
+ HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT();
@@ -941,6 +1010,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
HWRM_UNLOCK();
+ if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
+ bp->tx_cosq_id = bp->cos_queue[0].id;
+ } else {
+ /* iterate and find the COSq profile to use for Tx */
+ for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
+ if (bp->cos_queue[i].profile ==
+ HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
+ bp->tx_cosq_id = bp->cos_queue[i].id;
+ break;
+ }
+ }
+ }
+ PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
+
return rc;
}
@@ -964,7 +1047,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
- req.queue_id = bp->cos_queue[0].id;
+ req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
/* FALLTHROUGH */
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
@@ -1193,7 +1276,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_PREP(req, VNIC_ALLOC);
if (vnic->func_default)
- req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
+ req.flags =
+ rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT();
@@ -1214,7 +1298,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
HWRM_PREP(req, VNIC_PLCMODES_QCFG);
- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -1242,7 +1326,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
HWRM_PREP(req, VNIC_PLCMODES_CFG);
- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.flags = rte_cpu_to_le_32(pmode->flags);
req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
@@ -1451,6 +1535,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
HWRM_PREP(req, VNIC_RSS_CFG);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+ req.hash_mode_flags = vnic->hash_mode;
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr);
@@ -1486,7 +1571,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
size -= RTE_PKTMBUF_HEADROOM;
req.jumbo_thresh = rte_cpu_to_le_16(size);
- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -1517,12 +1602,12 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
req.max_agg_segs = rte_cpu_to_le_16(5);
req.max_aggs =
rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
req.min_agg_len = rte_cpu_to_le_32(512);
}
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -1607,10 +1692,9 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
- stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
- stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
-
- stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
+ stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
+ stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
+ stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
HWRM_UNLOCK();
@@ -1755,7 +1839,7 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
- unsigned int idx = bp->rx_cp_nr_rings + i + 1;
+ unsigned int idx = bp->rx_cp_nr_rings + i;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
@@ -1781,13 +1865,12 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
struct bnxt_ring *ring = rxr->rx_ring_struct;
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
- unsigned int idx = i + 1;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
memset(rxr->rx_desc_ring, 0,
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_desc_ring));
@@ -1808,7 +1891,7 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_free_cp_ring(bp, cpr, idx);
+ bnxt_free_cp_ring(bp, cpr, i);
bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
}
@@ -1992,6 +2075,7 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
switch (conf_link_speed) {
case ETH_LINK_SPEED_10M_HD:
case ETH_LINK_SPEED_100M_HD:
+ /* FALLTHROUGH */
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
}
return hw_link_duplex;
@@ -2012,6 +2096,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
case ETH_LINK_SPEED_100M:
case ETH_LINK_SPEED_100M_HD:
+ /* FALLTHROUGH */
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
break;
@@ -2176,6 +2261,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
switch (hw_link_duplex) {
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
+ /* FALLTHROUGH */
eth_link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
@@ -2305,6 +2391,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
+ /* FALLTHROUGH */
bp->port_partition_type = resp->port_partition_type;
break;
default:
@@ -2362,7 +2449,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS);
req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
@@ -2399,9 +2487,11 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS);
req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS);
req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
(num_vfs + 1));
req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
@@ -2766,9 +2856,9 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
req.req_buf_page_size = rte_cpu_to_le_16(
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
- req.req_buf_page_addr[0] =
+ req.req_buf_page_addr0 =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
- if (req.req_buf_page_addr[0] == 0) {
+ if (req.req_buf_page_addr0 == 0) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
@@ -2915,6 +3005,18 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
return rc;
}
+int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
+{
+ int rc;
+
+ if (BNXT_PF(bp))
+ rc = bnxt_hwrm_func_cfg_def_cp(bp);
+ else
+ rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
+
+ return rc;
+}
+
int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
void *encaped, size_t ec_size)
{
@@ -3029,9 +3131,6 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp)
struct bnxt_pf_info *pf = &bp->pf;
int rc;
- if (!(bp->flags & BNXT_FLAG_PORT_STATS))
- return 0;
-
HWRM_PREP(req, PORT_QSTATS);
req.port_id = rte_cpu_to_le_16(pf->port_id);
@@ -3588,8 +3687,8 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- filter->fw_em_filter_id = -1;
- filter->fw_l2_filter_id = -1;
+ filter->fw_em_filter_id = UINT64_MAX;
+ filter->fw_l2_filter_id = UINT64_MAX;
return 0;
}
@@ -3700,7 +3799,8 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- filter->fw_ntuple_filter_id = -1;
+ filter->fw_ntuple_filter_id = UINT64_MAX;
+ filter->fw_l2_filter_id = UINT64_MAX;
return 0;
}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index f11e72a3..60a4ab16 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_HWRM_H_
@@ -54,6 +26,9 @@ struct bnxt_cp_ring_info;
#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \
(1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32))
+#define HWRM_QUEUE_SERVICE_PROFILE_LOSSY \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY
+
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
@@ -88,6 +63,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp);
int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
+int bnxt_hwrm_set_async_event_cr(struct bnxt *bp);
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
@@ -135,6 +111,8 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp);
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
int bnxt_hwrm_func_qcfg(struct bnxt *bp);
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp);
int bnxt_hwrm_allocate_pf_only(struct bnxt *bp);
int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs);
int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf,
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 8ab98693..7ef7023e 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -68,30 +40,10 @@ static void bnxt_int_handler(void *param)
if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct))
break;
- switch (CMP_TYPE(cmp)) {
- case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
- /* Handle any async event */
- bnxt_handle_async_event(bp, cmp);
- break;
- case CMPL_BASE_TYPE_HWRM_FWD_REQ:
- /* Handle HWRM forwarded responses */
- bnxt_handle_fwd_req(bp, cmp);
- break;
- default:
- /* Ignore any other events */
- if (cmp->type & rte_cpu_to_le_16(0x01)) {
- if (!CMP_VALID(cmp, raw_cons,
- cpr->cp_ring_struct))
- goto no_more;
- }
- PMD_DRV_LOG(INFO,
- "Ignoring %02x completion\n", CMP_TYPE(cmp));
- break;
- }
+ bnxt_event_hwrm_resp_handler(bp, cmp);
raw_cons = NEXT_RAW_CMP(raw_cons);
-
};
-no_more:
+
cpr->cp_raw_cons = raw_cons;
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
}
@@ -127,7 +79,9 @@ void bnxt_enable_int(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- B_CP_DB_ARM(cpr);
+ /* Only the default completion ring */
+ if (cpr != NULL && cpr->cp_doorbell != NULL)
+ B_CP_DB_ARM(cpr);
}
int bnxt_setup_int(struct bnxt *bp)
diff --git a/drivers/net/bnxt/bnxt_irq.h b/drivers/net/bnxt/bnxt_irq.h
index 4d2f7af9..75ba2135 100644
--- a/drivers/net/bnxt/bnxt_irq.h
+++ b/drivers/net/bnxt/bnxt_irq.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_IRQ_H_
diff --git a/drivers/net/bnxt/bnxt_nvm_defs.h b/drivers/net/bnxt/bnxt_nvm_defs.h
index c5ccc9bc..ea9d4a9d 100644
--- a/drivers/net/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/bnxt/bnxt_nvm_defs.h
@@ -1,11 +1,6 @@
-/* Broadcom NetXtreme-C/E network driver.
- *
- * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_NVM_DEFS_H_
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 8fb89721..bb9f6d1c 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <rte_bitmap.h>
@@ -52,11 +24,14 @@
void bnxt_free_ring(struct bnxt_ring *ring)
{
+ if (!ring)
+ return;
+
if (ring->vmem_size && *ring->vmem) {
memset((char *)*ring->vmem, 0, ring->vmem_size);
*ring->vmem = NULL;
}
- rte_memzone_free((const struct rte_memzone *)ring->mem_zone);
+ ring->mem_zone = NULL;
}
/*
@@ -89,22 +64,26 @@ int bnxt_init_ring_grps(struct bnxt *bp)
* rx bd ring - Only non-zero length if rx_ring_info is not NULL
*/
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
- struct bnxt_tx_ring_info *tx_ring_info,
- struct bnxt_rx_ring_info *rx_ring_info,
+ struct bnxt_tx_queue *txq,
+ struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
const char *suffix)
{
struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
+ struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
+ struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
struct bnxt_ring *tx_ring;
struct bnxt_ring *rx_ring;
struct rte_pci_device *pdev = bp->pdev;
+ uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
rte_iova_t mz_phys_addr;
int sz;
int stats_len = (tx_ring_info || rx_ring_info) ?
- RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
+ sizeof (struct hwrm_resp_hdr)) : 0;
int cp_vmem_start = stats_len;
int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
@@ -155,7 +134,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
sizeof(struct bnxt_tpa_info)) : 0;
int total_alloc_len = tpa_info_start;
- if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
total_alloc_len += tpa_info_len;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
@@ -166,10 +145,11 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY,
- getpagesize());
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ getpagesize());
if (mz == NULL)
return -ENOMEM;
}
@@ -191,6 +171,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
}
if (tx_ring_info) {
+ txq->mz = mz;
tx_ring = tx_ring_info->tx_ring_struct;
tx_ring->bd = ((char *)mz->addr + tx_ring_start);
@@ -210,6 +191,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
}
if (rx_ring_info) {
+ rxq->mz = mz;
rx_ring = rx_ring_info->rx_ring_struct;
rx_ring->bd = ((char *)mz->addr + rx_ring_start);
@@ -252,7 +234,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
ag_bitmap_start, ag_bitmap_len);
/* TPA info */
- if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
rx_ring_info->tpa_info =
((struct bnxt_tpa_info *)((char *)mz->addr +
tpa_info_start));
@@ -283,7 +265,6 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
*/
int bnxt_alloc_hwrm_rings(struct bnxt *bp)
{
- struct rte_pci_device *pci_dev = bp->pdev;
unsigned int i;
int rc = 0;
@@ -293,33 +274,48 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
struct bnxt_ring *ring = rxr->rx_ring_struct;
- unsigned int idx = i + 1;
- unsigned int map_idx = idx + bp->rx_cp_nr_rings;
+ unsigned int map_idx = i + bp->rx_cp_nr_rings;
bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
/* Rx cmpl */
- rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
- idx, HWRM_NA_SIGNATURE,
- HWRM_NA_SIGNATURE);
+ rc = bnxt_hwrm_ring_alloc
+ (bp,
+ cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ i,
+ HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
- cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
- idx * 0x80;
+ cpr->cp_doorbell = (char *)bp->doorbell_base + i * 0x80;
bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ if (!i) {
+ /*
+ * In order to save completion resource, use the first
+ * completion ring from PF or VF as the default
+ * completion ring for async event & HWRM
+ * forward response handling.
+ */
+ bp->def_cp_ring = cpr;
+ rc = bnxt_hwrm_set_async_event_cr(bp);
+ if (rc)
+ goto err_out;
+ }
+
/* Rx ring */
- rc = bnxt_hwrm_ring_alloc(bp, ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
- idx, cpr->hw_stats_ctx_id,
- cp_ring->fw_ring_id);
+ rc = bnxt_hwrm_ring_alloc(bp,
+ ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ i,
+ cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
if (rc)
goto err_out;
rxr->rx_prod = 0;
- rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
- idx * 0x80;
+ rxr->rx_doorbell = (char *)bp->doorbell_base + i * 0x80;
bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
@@ -338,9 +334,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
goto err_out;
PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
rxr->ag_prod = 0;
- rxr->ag_doorbell =
- (char *)pci_dev->mem_resource[2].addr +
- map_idx * 0x80;
+ rxr->ag_doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
@@ -353,7 +347,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
}
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
- rxq->index = idx;
+ rxq->index = i;
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
@@ -362,7 +356,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
- unsigned int idx = i + 1 + bp->rx_cp_nr_rings;
+ unsigned int idx = i + bp->rx_cp_nr_rings;
/* Tx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
@@ -372,8 +366,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
- cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
- idx * 0x80;
+ cpr->cp_doorbell = (char *)bp->doorbell_base + idx * 0x80;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
/* Tx ring */
@@ -384,8 +377,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
- txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr +
- idx * 0x80;
+ txr->tx_doorbell = (char *)bp->doorbell_base + idx * 0x80;
txq->index = idx;
}
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index ebf7228e..65bf3e2f 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_RING_H_
@@ -56,6 +28,7 @@
#define BNXT_TPA_MAX 64
#define AGG_RING_SIZE_FACTOR 2
+#define AGG_RING_MULTIPLIER 2
/* These assume 4k pages */
#define MAX_RX_DESC_CNT (8 * 1024)
@@ -93,8 +66,8 @@ struct bnxt_cp_ring_info;
void bnxt_free_ring(struct bnxt_ring *ring);
int bnxt_init_ring_grps(struct bnxt *bp);
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
- struct bnxt_tx_ring_info *tx_ring_info,
- struct bnxt_rx_ring_info *rx_ring_info,
+ struct bnxt_tx_queue *txq,
+ struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
const char *suffix);
int bnxt_alloc_hwrm_rings(struct bnxt *bp);
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index d49f3546..c55ddec4 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -51,10 +23,8 @@
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
{
- struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
-
- if (cpr->hw_stats)
- cpr->hw_stats = NULL;
+ if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
+ rxq->cp_ring->hw_stats = NULL;
}
int bnxt_mq_rx_configure(struct bnxt *bp)
@@ -107,6 +77,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_RSS:
case ETH_MQ_RX_VMDQ_ONLY:
+ /* FALLTHROUGH */
/* ETH_8/64_POOLs */
pools = conf->nb_queue_pools;
/* For each pool, allocate MACVLAN CFA rule & VNIC */
@@ -237,7 +208,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
if (rxq) {
sw_ring = rxq->rx_ring->rx_buf_ring;
if (sw_ring) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
+ for (i = 0;
+ i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
if (sw_ring[i].mbuf) {
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
sw_ring[i].mbuf = NULL;
@@ -247,7 +219,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
/* Free up mbufs in Agg ring */
sw_ring = rxq->rx_ring->ag_buf_ring;
if (sw_ring) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
+ for (i = 0;
+ i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
if (sw_ring[i].mbuf) {
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
sw_ring[i].mbuf = NULL;
@@ -295,6 +268,8 @@ void bnxt_rx_queue_release_op(void *rx_queue)
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
bnxt_free_rxq_stats(rxq);
+ rte_memzone_free(rxq->mz);
+ rxq->mz = NULL;
rte_free(rxq);
}
@@ -308,6 +283,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
struct rte_mempool *mp)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
@@ -315,7 +291,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
queue_idx, bp->max_rx_rings);
- return -ENOSPC;
+ return -EINVAL;
}
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
@@ -350,12 +326,12 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP ?
+ 0 : ETHER_CRC_LEN;
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
+ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
"rxr")) {
PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
@@ -363,6 +339,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rc = -ENOMEM;
goto out;
}
+ rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
out:
return rc;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index c7acaa75..8307f603 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_RQX_H_
@@ -60,6 +32,8 @@ struct bnxt_rx_queue {
uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
+ rte_atomic64_t rx_mbuf_alloc_fail;
+ const struct rte_memzone *mz;
};
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index aae9a635..9d884292 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -69,13 +41,14 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
return -ENOMEM;
}
rx_buf->mbuf = mbuf;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
return 0;
}
@@ -90,7 +63,7 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
return -ENOMEM;
}
@@ -101,8 +74,9 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
rx_buf->mbuf = mbuf;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
return 0;
}
@@ -123,7 +97,7 @@ static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
prod_bd = &rxr->rx_desc_ring[prod];
- prod_bd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxr->rx_prod = prod;
}
@@ -143,7 +117,7 @@ static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
prod_bd = &rxr->ag_desc_ring[prod];
cons_bd = &rxr->ag_desc_ring[cons];
- prod_bd->addr = cons_bd->addr;
+ prod_bd->address = cons_bd->addr;
}
#endif
@@ -327,7 +301,7 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -490,11 +464,15 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
else
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
else
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
@@ -560,6 +538,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t prod = rxr->rx_prod;
uint16_t ag_prod = rxr->ag_prod;
int rc = 0;
+ bool evt = false;
/* If Rx Q was stopped return */
if (rxq->rx_deferred_start)
@@ -584,14 +563,19 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_rx_pkts++;
if (rc == -EBUSY) /* partial completion */
break;
+ } else {
+ evt =
+ bnxt_event_hwrm_resp_handler(rxq->bp,
+ (struct cmpl_base *)rxcmp);
}
+
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (nb_rx_pkts == nb_pkts)
+ if (nb_rx_pkts == nb_pkts || evt)
break;
}
cpr->cp_raw_cons = raw_cons;
- if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) {
+ if ((prod == rxr->rx_prod && ag_prod == rxr->ag_prod) && !evt) {
/*
* For PMD, there is no need to keep on pushing to REARM
* the doorbell if there are no new completions
@@ -600,9 +584,12 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ if (prod != rxr->rx_prod)
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+
/* Ring the AGG ring DB */
- B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+ if (ag_prod != rxr->ag_prod)
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
/* Attempt to alloc Rx buf in case of a previous allocation failure. */
if (rc == -ENOMEM) {
@@ -755,7 +742,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
if (rxq->rx_buf_use_size <= size)
size = rxq->rx_buf_use_size;
- type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
+ type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
rxr = rxq->rx_ring;
ring = rxr->rx_ring_struct;
@@ -795,7 +782,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
return -ENOMEM;
}
}
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index f3ed49bd..5b28f032 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_RXR_H_
@@ -52,22 +24,36 @@
#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
((hdr_info) & 0x1ff)
-#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
+#define RX_CMP_L4_CS_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
+ RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
-#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR)
+#define RX_CMP_L4_CS_ERR_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \
+ RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR)
#define RX_CMP_L4_CS_OK(rxcmp1) \
(((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \
!((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS))
-#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR)
+#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
+ !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
-#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
+#define RX_CMP_IP_CS_ERR_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \
+ RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR)
+
+#define RX_CMP_IP_CS_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
+ RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
#define RX_CMP_IP_CS_OK(rxcmp1) \
(((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \
!((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS))
+#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
+ !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
+
enum pkt_hash_types {
PKT_HASH_TYPE_NONE, /* Undefined type */
PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index bd93cc83..bbd4e78b 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -172,8 +144,8 @@ static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
tx_mcast_pkts)},
{"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
tx_bcast_pkts)},
- {"tx_err_pkts", offsetof(struct hwrm_func_qstats_output,
- tx_err_pkts)},
+ {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_discard_pkts)},
{"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
tx_drop_pkts)},
{"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
@@ -188,8 +160,8 @@ static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
rx_mcast_pkts)},
{"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
rx_bcast_pkts)},
- {"rx_err_pkts", offsetof(struct hwrm_func_qstats_output,
- rx_err_pkts)},
+ {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_discard_pkts)},
{"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
rx_drop_pkts)},
{"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
@@ -238,7 +210,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
memset(bnxt_stats, 0, sizeof(*bnxt_stats));
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
- return 0;
+ return -1;
}
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
@@ -249,6 +221,8 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_stats, 1);
if (unlikely(rc))
return rc;
+ bnxt_stats->rx_nombuf +=
+ rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
@@ -263,13 +237,13 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
if (unlikely(rc))
return rc;
- bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail);
return rc;
}
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ unsigned int i;
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
@@ -277,7 +251,11 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
}
bnxt_clear_all_hwrm_stat_ctxs(bp);
- rte_atomic64_clear(&bp->rx_mbuf_alloc_fail);
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+ rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail);
+ }
}
int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
@@ -288,11 +266,6 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
unsigned int count, i;
uint64_t tx_drop_pkts;
- if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
- PMD_DRV_LOG(ERR, "xstats not supported for VF\n");
- return 0;
- }
-
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);
diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h
index c1c83d57..b0f135a5 100644
--- a/drivers/net/bnxt/bnxt_stats.h
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_STATS_H_
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index 53524346..b9b975e4 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -47,10 +19,8 @@
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
{
- struct bnxt_cp_ring_info *cpr = txq->cp_ring;
-
- if (cpr->hw_stats)
- cpr->hw_stats = NULL;
+ if (txq && txq->cp_ring && txq->cp_ring->hw_stats)
+ txq->cp_ring->hw_stats = NULL;
}
static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
@@ -58,6 +28,9 @@ static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
struct bnxt_sw_tx_bd *sw_ring;
uint16_t i;
+ if (!txq)
+ return;
+
sw_ring = txq->tx_ring->tx_buf_ring;
if (sw_ring) {
for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) {
@@ -93,6 +66,8 @@ void bnxt_tx_queue_release_op(void *tx_queue)
bnxt_free_ring(txq->cp_ring->cp_ring_struct);
bnxt_free_txq_stats(txq);
+ rte_memzone_free(txq->mz);
+ txq->mz = NULL;
rte_free(txq);
}
@@ -112,7 +87,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
PMD_DRV_LOG(ERR,
"Cannot create Tx ring %d. Only %d rings available\n",
queue_idx, bp->max_tx_rings);
- return -ENOSPC;
+ return -EINVAL;
}
if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
@@ -147,7 +122,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq->port_id = eth_dev->data->port_id;
/* Allocate TX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
+ if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,
"txr")) {
PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index e27c34fa..720ca90c 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_TXQ_H_
@@ -50,7 +22,6 @@ struct bnxt_tx_queue {
uint8_t pthresh; /* Prefetch threshold register */
uint8_t hthresh; /* Host threshold register */
uint8_t wthresh; /* Write-back threshold reg */
- uint32_t txq_flags; /* Holds flags for this TXq */
uint32_t ctx_curr; /* Hardware context states */
uint8_t tx_deferred_start; /* not in global dev start */
@@ -61,6 +32,7 @@ struct bnxt_tx_queue {
unsigned int cp_nr_rings;
struct bnxt_cp_ring_info *cp_ring;
+ const struct rte_memzone *mz;
};
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 2c81a37c..470fddd5 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -181,7 +153,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
else
txbd->flags_type |= lhint_arr[txbd->len >> 9];
- txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf));
+ txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf));
if (long_bd) {
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
@@ -262,7 +234,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
txbd = &txr->tx_desc_ring[txr->tx_prod];
- txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg));
+ txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg));
txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
txbd->len = m_seg->data_len;
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index d88b15ab..15c7e5a0 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_TXR_H_
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index d4aeb4ca..19d06af5 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -77,6 +49,8 @@ void bnxt_init_vnics(struct bnxt *bp)
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->hash_mode =
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
@@ -185,10 +159,10 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve(mz_name,
- entry_length * max_vnics,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY);
+ entry_length * max_vnics, SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
if (!mz)
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index d8d35c7d..c521d7e5 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_VNIC_H_
@@ -53,6 +25,7 @@ struct bnxt_vnic_info {
uint16_t dflt_ring_grp;
uint16_t mru;
uint16_t hash_type;
+ uint8_t hash_mode;
rte_iova_t rss_table_dma_addr;
uint16_t *rss_table;
rte_iova_t rss_hash_key_dma_addr;
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index 1e9c39f4..fd6d8807 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -1,254 +1,1266 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * All rights reserved.
*
- * Copyright(c) 2001-2017 Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * DO NOT MODIFY!!! This file is automatically generated.
*/
-#ifndef _HSI_STRUCT_DEF_DPDK_
-#define _HSI_STRUCT_DEF_DPDK_
-/* HSI and HWRM Specification 1.8.2 */
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 8
-#define HWRM_VERSION_UPDATE 2
+#ifndef _HSI_STRUCT_DEF_DPDK_H_
+#define _HSI_STRUCT_DEF_DPDK_H_
-#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */
+/* This is the HWRM command header. */
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
-#define HWRM_VERSION_STR "1.8.2.0"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
-#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
-#define HWRM_ROCE_SP_HSI_VERSION_MAJOR 1
-#define HWRM_ROCE_SP_HSI_VERSION_MINOR 8
-#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 2
+/* This is the HWRM response header. */
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+} __attribute__((packed));
/*
- * Request types
+ * TLV encapsulated message. Use the TLV type field of the
+ * TLV to determine the type of message encapsulated.
*/
-#define HWRM_VER_GET (UINT32_C(0x0))
-#define HWRM_FUNC_BUF_UNRGTR (UINT32_C(0xe))
-#define HWRM_FUNC_VF_CFG (UINT32_C(0xf))
- /* Reserved for future use */
-#define RESERVED1 (UINT32_C(0x10))
-#define HWRM_FUNC_RESET (UINT32_C(0x11))
-#define HWRM_FUNC_GETFID (UINT32_C(0x12))
-#define HWRM_FUNC_VF_ALLOC (UINT32_C(0x13))
-#define HWRM_FUNC_VF_FREE (UINT32_C(0x14))
-#define HWRM_FUNC_QCAPS (UINT32_C(0x15))
-#define HWRM_FUNC_QCFG (UINT32_C(0x16))
-#define HWRM_FUNC_CFG (UINT32_C(0x17))
-#define HWRM_FUNC_QSTATS (UINT32_C(0x18))
-#define HWRM_FUNC_CLR_STATS (UINT32_C(0x19))
-#define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a))
-#define HWRM_FUNC_VF_RESC_FREE (UINT32_C(0x1b))
-#define HWRM_FUNC_VF_VNIC_IDS_QUERY (UINT32_C(0x1c))
-#define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d))
-#define HWRM_FUNC_DRV_QVER (UINT32_C(0x1e))
-#define HWRM_FUNC_BUF_RGTR (UINT32_C(0x1f))
-#define HWRM_PORT_PHY_CFG (UINT32_C(0x20))
-#define HWRM_PORT_MAC_CFG (UINT32_C(0x21))
-#define HWRM_PORT_QSTATS (UINT32_C(0x23))
-#define HWRM_PORT_LPBK_QSTATS (UINT32_C(0x24))
-#define HWRM_PORT_CLR_STATS (UINT32_C(0x25))
-#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27))
-#define HWRM_PORT_MAC_QCFG (UINT32_C(0x28))
-#define HWRM_PORT_MAC_PTP_QCFG (UINT32_C(0x29))
-#define HWRM_PORT_PHY_QCAPS (UINT32_C(0x2a))
-#define HWRM_PORT_LED_CFG (UINT32_C(0x2d))
-#define HWRM_PORT_LED_QCFG (UINT32_C(0x2e))
-#define HWRM_PORT_LED_QCAPS (UINT32_C(0x2f))
-#define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30))
-#define HWRM_QUEUE_QCFG (UINT32_C(0x31))
-#define HWRM_QUEUE_CFG (UINT32_C(0x32))
-#define HWRM_FUNC_VLAN_CFG (UINT32_C(0x33))
-#define HWRM_FUNC_VLAN_QCFG (UINT32_C(0x34))
-#define HWRM_QUEUE_PFCENABLE_QCFG (UINT32_C(0x35))
-#define HWRM_QUEUE_PFCENABLE_CFG (UINT32_C(0x36))
-#define HWRM_QUEUE_PRI2COS_QCFG (UINT32_C(0x37))
-#define HWRM_QUEUE_PRI2COS_CFG (UINT32_C(0x38))
-#define HWRM_QUEUE_COS2BW_QCFG (UINT32_C(0x39))
-#define HWRM_QUEUE_COS2BW_CFG (UINT32_C(0x3a))
-#define HWRM_VNIC_ALLOC (UINT32_C(0x40))
-#define HWRM_VNIC_ALLOC (UINT32_C(0x40))
-#define HWRM_VNIC_FREE (UINT32_C(0x41))
-#define HWRM_VNIC_CFG (UINT32_C(0x42))
-#define HWRM_VNIC_QCFG (UINT32_C(0x43))
-#define HWRM_VNIC_TPA_CFG (UINT32_C(0x44))
-#define HWRM_VNIC_RSS_CFG (UINT32_C(0x46))
-#define HWRM_VNIC_RSS_QCFG (UINT32_C(0x47))
-#define HWRM_VNIC_PLCMODES_CFG (UINT32_C(0x48))
-#define HWRM_VNIC_PLCMODES_QCFG (UINT32_C(0x49))
-#define HWRM_VNIC_QCAPS (UINT32_C(0x4a))
-#define HWRM_RING_ALLOC (UINT32_C(0x50))
-#define HWRM_RING_FREE (UINT32_C(0x51))
-#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (UINT32_C(0x52))
-#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (UINT32_C(0x53))
-#define HWRM_RING_RESET (UINT32_C(0x5e))
-#define HWRM_RING_GRP_ALLOC (UINT32_C(0x60))
-#define HWRM_RING_GRP_FREE (UINT32_C(0x61))
-#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70))
-#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (UINT32_C(0x71))
-#define HWRM_CFA_L2_FILTER_ALLOC (UINT32_C(0x90))
-#define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91))
-#define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92))
-#define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93))
- /* Reserved for future use */
-#define HWRM_CFA_VLAN_ANTISPOOF_CFG (UINT32_C(0x94))
-#define HWRM_CFA_TUNNEL_FILTER_ALLOC (UINT32_C(0x95))
-#define HWRM_CFA_TUNNEL_FILTER_FREE (UINT32_C(0x96))
-#define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99))
-#define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a))
-#define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b))
-#define HWRM_CFA_EM_FLOW_ALLOC (UINT32_C(0x9c))
-#define HWRM_CFA_EM_FLOW_FREE (UINT32_C(0x9d))
-#define HWRM_CFA_EM_FLOW_CFG (UINT32_C(0x9e))
-#define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0))
-#define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1))
-#define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2))
-#define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0))
-#define HWRM_STAT_CTX_FREE (UINT32_C(0xb1))
-#define HWRM_STAT_CTX_QUERY (UINT32_C(0xb2))
-#define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3))
-#define HWRM_FW_RESET (UINT32_C(0xc0))
-#define HWRM_FW_QSTATUS (UINT32_C(0xc1))
-#define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0))
-#define HWRM_REJECT_FWD_RESP (UINT32_C(0xd1))
-#define HWRM_FWD_RESP (UINT32_C(0xd2))
-#define HWRM_FWD_ASYNC_EVENT_CMPL (UINT32_C(0xd3))
-#define HWRM_TEMP_MONITOR_QUERY (UINT32_C(0xe0))
-#define HWRM_WOL_FILTER_ALLOC (UINT32_C(0xf0))
-#define HWRM_WOL_FILTER_FREE (UINT32_C(0xf1))
-#define HWRM_WOL_FILTER_QCFG (UINT32_C(0xf2))
-#define HWRM_WOL_REASON_QCFG (UINT32_C(0xf3))
-#define HWRM_DBG_DUMP (UINT32_C(0xff14))
-#define HWRM_NVM_VALIDATE_OPTION (UINT32_C(0xffef))
-#define HWRM_NVM_FLUSH (UINT32_C(0xfff0))
-#define HWRM_NVM_GET_VARIABLE (UINT32_C(0xfff1))
-#define HWRM_NVM_SET_VARIABLE (UINT32_C(0xfff2))
-#define HWRM_NVM_INSTALL_UPDATE (UINT32_C(0xfff3))
-#define HWRM_NVM_MODIFY (UINT32_C(0xfff4))
-#define HWRM_NVM_VERIFY_UPDATE (UINT32_C(0xfff5))
-#define HWRM_NVM_GET_DEV_INFO (UINT32_C(0xfff6))
-#define HWRM_NVM_ERASE_DIR_ENTRY (UINT32_C(0xfff7))
-#define HWRM_NVM_MOD_DIR_ENTRY (UINT32_C(0xfff8))
-#define HWRM_NVM_FIND_DIR_ENTRY (UINT32_C(0xfff9))
-#define HWRM_NVM_GET_DIR_ENTRIES (UINT32_C(0xfffa))
-#define HWRM_NVM_GET_DIR_INFO (UINT32_C(0xfffb))
-#define HWRM_NVM_RAW_DUMP (UINT32_C(0xfffc))
-#define HWRM_NVM_READ (UINT32_C(0xfffd))
-#define HWRM_NVM_WRITE (UINT32_C(0xfffe))
-#define HWRM_NVM_RAW_WRITE_BLK (UINT32_C(0xffff))
+#define CMD_DISCR_TLV_ENCAP UINT32_C(0x8000)
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+/* HWRM request message */
+#define TLV_TYPE_HWRM_REQUEST UINT32_C(0x1)
+/* HWRM response message */
+#define TLV_TYPE_HWRM_RESPONSE UINT32_C(0x2)
+/* RoCE slow path command */
+#define TLV_TYPE_ROCE_SP_COMMAND UINT32_C(0x3)
+/* Engine CKV - The device's serial number. */
+#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER UINT32_C(0x8001)
+/* Engine CKV - Per-function random nonce data. */
+#define TLV_TYPE_ENGINE_CKV_NONCE UINT32_C(0x8002)
+/* Engine CKV - Initialization vector. */
+#define TLV_TYPE_ENGINE_CKV_IV UINT32_C(0x8003)
+/* Engine CKV - Authentication tag. */
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG UINT32_C(0x8004)
+/* Engine CKV - The encrypted data. */
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT UINT32_C(0x8005)
+/* Engine CKV - Supported algorithms. */
+#define TLV_TYPE_ENGINE_CKV_ALGORITHMS UINT32_C(0x8006)
+/* Engine CKV - The EC curve name and ECC public key information. */
+#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY UINT32_C(0x8007)
+/* Engine CKV - The ECDSA signature. */
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE UINT32_C(0x8008)
+#define TLV_TYPE_LAST \
+ TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ /*
+ * The command discriminator is used to differentiate between various
+ * types of HWRM messages. This includes legacy HWRM and RoCE slowpath
+ * command messages as well as newer TLV encapsulated HWRM commands.
+ *
+ * For TLV encapsulated messages this field must be 0x8000.
+ */
+ uint16_t cmd_discr;
+ uint8_t reserved_8b;
+ uint8_t flags;
+ /*
+ * Indicates the presence of additional TLV encapsulated data
+ * follows this TLV.
+ */
+ #define TLV_FLAGS_MORE UINT32_C(0x1)
+ /* Last TLV in a sequence of TLVs. */
+ #define TLV_FLAGS_MORE_LAST UINT32_C(0x0)
+ /* More TLVs follow this TLV. */
+ #define TLV_FLAGS_MORE_NOT_LAST UINT32_C(0x1)
+ /*
+ * When an HWRM receiver detects a TLV type that it does not
+ * support with the TLV required flag set, the receiver must
+ * reject the HWRM message with an error code indicating an
+ * unsupported TLV type.
+ */
+ #define TLV_FLAGS_REQUIRED UINT32_C(0x2)
+ /* No */
+ #define TLV_FLAGS_REQUIRED_NO (UINT32_C(0x0) << 1)
+ /* Yes */
+ #define TLV_FLAGS_REQUIRED_YES (UINT32_C(0x1) << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ /*
+ * This field defines the TLV type value which is divided into
+ * two ranges to differentiate between global and local TLV types.
+ * Global TLV types must be unique across all defined TLV types.
+ * Local TLV types are valid only for extensions to a given
+ * HWRM message and may be repeated across different HWRM message
+ * types. There is a direct correlation of each HWRM message type
+ * to a single global TLV type value.
+ *
+ * Global TLV range: `0 - (63k-1)`
+ *
+ * Local TLV range: `63k - (64k-1)`
+ */
+ uint16_t tlv_type;
+ /*
+ * Length of the message data encapsulated by this TLV in bytes.
+ * This length does not include the size of the TLV header itself
+ * and it must be an integer multiple of 8B.
+ */
+ uint16_t length;
+} __attribute__((packed));
+
+/* Input */
+/* input (size:128b/16B) */
+struct input {
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+ /*
+ * This value indicates the what completion ring the request will
+ * be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t cmpl_ring;
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+ /*
+ * Target ID of this command.
+ *
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* Output */
+/* output (size:64b/8B) */
+struct output {
+ /*
+ * Pass/Fail or error type
+ *
+ * Note: receiver to verify the in parameters, and fail the call
+ * with an error when appropriate
+ */
+ uint16_t error_code;
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+ /*
+ * This field is the length of the response in bytes. The
+ * last byte of the response is a valid flag that will read
+ * as '1' when the command has been completely written to
+ * memory.
+ */
+ uint16_t resp_len;
+} __attribute__((packed));
+
+/* Short Command Structure */
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ /*
+ * This field indicates the type of request in the request buffer.
+ * The format for the rest of the command (request) is determined
+ * by this field.
+ */
+ uint16_t req_type;
+ /*
+ * This field indicates a signature that is used to identify short
+ * form of the command listed here. This field shall be set to
+ * 17185 (0x4321).
+ */
+ uint16_t signature;
+ /* Signature indicating this is a short form of HWRM command */
+ #define HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD UINT32_C(0x4321)
+ #define HWRM_SHORT_INPUT_SIGNATURE_LAST \
+ HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD
+ /* Reserved for future use. */
+ uint16_t unused_0;
+ /* This value indicates the length of the request. */
+ uint16_t size;
+ /*
+ * This is the host address where the request was written.
+ * This area must be 16B aligned.
+ */
+ uint64_t req_addr;
+} __attribute__((packed));
/*
- * Note: The Host Software Interface (HSI) and Hardware Resource Manager (HWRM)
- * specification describes the data structures used in Ethernet packet or RDMA
- * message data transfers as well as an abstract interface for managing Ethernet
- * NIC hardware resources.
- */
-/* Ethernet Data path Host Structures */
-/*
- * Description: The following three sections document the host structures used
- * between device and software drivers for communicating Ethernet packets.
+ * Command numbering
+ * # NOTE - definitions already in hwrm_req_type, in hwrm_types.yaml
+ * # So only structure definition is provided here.
*/
-/* BD Ring Structures */
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ /*
+ * This version of the specification defines the commands listed in
+ * the table below. The following are general implementation
+ * requirements for these commands:
+ *
+ * # All commands listed below that are marked neither
+ * reserved nor experimental shall be implemented by the HWRM.
+ * # A HWRM client compliant to this specification should not use
+ * commands outside of the list below.
+ * # A HWRM client compliant to this specification should not use
+ * command numbers marked reserved below.
+ * # A command marked experimental below may not be implemented
+ * by the HWRM.
+ * # A command marked experimental may change in the
+ * future version of the HWRM specification.
+ * # A command not listed below may be implemented by the HWRM.
+ * The behavior of commands that are not listed below is outside
+ * the scope of this specification.
+ */
+ uint16_t req_type;
+ #define HWRM_VER_GET UINT32_C(0x0)
+ #define HWRM_FUNC_BUF_UNRGTR UINT32_C(0xe)
+ #define HWRM_FUNC_VF_CFG UINT32_C(0xf)
+ /* Reserved for future use. */
+ #define HWRM_RESERVED1 UINT32_C(0x10)
+ #define HWRM_FUNC_RESET UINT32_C(0x11)
+ #define HWRM_FUNC_GETFID UINT32_C(0x12)
+ #define HWRM_FUNC_VF_ALLOC UINT32_C(0x13)
+ #define HWRM_FUNC_VF_FREE UINT32_C(0x14)
+ #define HWRM_FUNC_QCAPS UINT32_C(0x15)
+ #define HWRM_FUNC_QCFG UINT32_C(0x16)
+ #define HWRM_FUNC_CFG UINT32_C(0x17)
+ #define HWRM_FUNC_QSTATS UINT32_C(0x18)
+ #define HWRM_FUNC_CLR_STATS UINT32_C(0x19)
+ #define HWRM_FUNC_DRV_UNRGTR UINT32_C(0x1a)
+ #define HWRM_FUNC_VF_RESC_FREE UINT32_C(0x1b)
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY UINT32_C(0x1c)
+ #define HWRM_FUNC_DRV_RGTR UINT32_C(0x1d)
+ #define HWRM_FUNC_DRV_QVER UINT32_C(0x1e)
+ #define HWRM_FUNC_BUF_RGTR UINT32_C(0x1f)
+ #define HWRM_PORT_PHY_CFG UINT32_C(0x20)
+ #define HWRM_PORT_MAC_CFG UINT32_C(0x21)
+ /* Experimental */
+ #define HWRM_PORT_TS_QUERY UINT32_C(0x22)
+ #define HWRM_PORT_QSTATS UINT32_C(0x23)
+ #define HWRM_PORT_LPBK_QSTATS UINT32_C(0x24)
+ /* Experimental */
+ #define HWRM_PORT_CLR_STATS UINT32_C(0x25)
+ /* Experimental */
+ #define HWRM_PORT_LPBK_CLR_STATS UINT32_C(0x26)
+ #define HWRM_PORT_PHY_QCFG UINT32_C(0x27)
+ #define HWRM_PORT_MAC_QCFG UINT32_C(0x28)
+ /* Experimental */
+ #define HWRM_PORT_MAC_PTP_QCFG UINT32_C(0x29)
+ #define HWRM_PORT_PHY_QCAPS UINT32_C(0x2a)
+ #define HWRM_PORT_PHY_I2C_WRITE UINT32_C(0x2b)
+ #define HWRM_PORT_PHY_I2C_READ UINT32_C(0x2c)
+ #define HWRM_PORT_LED_CFG UINT32_C(0x2d)
+ #define HWRM_PORT_LED_QCFG UINT32_C(0x2e)
+ #define HWRM_PORT_LED_QCAPS UINT32_C(0x2f)
+ #define HWRM_QUEUE_QPORTCFG UINT32_C(0x30)
+ #define HWRM_QUEUE_QCFG UINT32_C(0x31)
+ #define HWRM_QUEUE_CFG UINT32_C(0x32)
+ #define HWRM_FUNC_VLAN_CFG UINT32_C(0x33)
+ #define HWRM_FUNC_VLAN_QCFG UINT32_C(0x34)
+ #define HWRM_QUEUE_PFCENABLE_QCFG UINT32_C(0x35)
+ #define HWRM_QUEUE_PFCENABLE_CFG UINT32_C(0x36)
+ #define HWRM_QUEUE_PRI2COS_QCFG UINT32_C(0x37)
+ #define HWRM_QUEUE_PRI2COS_CFG UINT32_C(0x38)
+ #define HWRM_QUEUE_COS2BW_QCFG UINT32_C(0x39)
+ #define HWRM_QUEUE_COS2BW_CFG UINT32_C(0x3a)
+ /* Experimental */
+ #define HWRM_QUEUE_DSCP_QCAPS UINT32_C(0x3b)
+ /* Experimental */
+ #define HWRM_QUEUE_DSCP2PRI_QCFG UINT32_C(0x3c)
+ /* Experimental */
+ #define HWRM_QUEUE_DSCP2PRI_CFG UINT32_C(0x3d)
+ #define HWRM_VNIC_ALLOC UINT32_C(0x40)
+ #define HWRM_VNIC_FREE UINT32_C(0x41)
+ #define HWRM_VNIC_CFG UINT32_C(0x42)
+ #define HWRM_VNIC_QCFG UINT32_C(0x43)
+ #define HWRM_VNIC_TPA_CFG UINT32_C(0x44)
+ /* Experimental */
+ #define HWRM_VNIC_TPA_QCFG UINT32_C(0x45)
+ #define HWRM_VNIC_RSS_CFG UINT32_C(0x46)
+ #define HWRM_VNIC_RSS_QCFG UINT32_C(0x47)
+ #define HWRM_VNIC_PLCMODES_CFG UINT32_C(0x48)
+ #define HWRM_VNIC_PLCMODES_QCFG UINT32_C(0x49)
+ #define HWRM_VNIC_QCAPS UINT32_C(0x4a)
+ #define HWRM_RING_ALLOC UINT32_C(0x50)
+ #define HWRM_RING_FREE UINT32_C(0x51)
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS UINT32_C(0x52)
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS UINT32_C(0x53)
+ #define HWRM_RING_RESET UINT32_C(0x5e)
+ #define HWRM_RING_GRP_ALLOC UINT32_C(0x60)
+ #define HWRM_RING_GRP_FREE UINT32_C(0x61)
+ /* Reserved for future use. */
+ #define HWRM_RESERVED5 UINT32_C(0x64)
+ /* Reserved for future use. */
+ #define HWRM_RESERVED6 UINT32_C(0x65)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC UINT32_C(0x70)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE UINT32_C(0x71)
+ #define HWRM_CFA_L2_FILTER_ALLOC UINT32_C(0x90)
+ #define HWRM_CFA_L2_FILTER_FREE UINT32_C(0x91)
+ #define HWRM_CFA_L2_FILTER_CFG UINT32_C(0x92)
+ #define HWRM_CFA_L2_SET_RX_MASK UINT32_C(0x93)
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG UINT32_C(0x94)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC UINT32_C(0x95)
+ #define HWRM_CFA_TUNNEL_FILTER_FREE UINT32_C(0x96)
+ /* Experimental */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC UINT32_C(0x97)
+ /* Experimental */
+ #define HWRM_CFA_ENCAP_RECORD_FREE UINT32_C(0x98)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC UINT32_C(0x99)
+ #define HWRM_CFA_NTUPLE_FILTER_FREE UINT32_C(0x9a)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG UINT32_C(0x9b)
+ /* Experimental */
+ #define HWRM_CFA_EM_FLOW_ALLOC UINT32_C(0x9c)
+ /* Experimental */
+ #define HWRM_CFA_EM_FLOW_FREE UINT32_C(0x9d)
+ /* Experimental */
+ #define HWRM_CFA_EM_FLOW_CFG UINT32_C(0x9e)
+ #define HWRM_TUNNEL_DST_PORT_QUERY UINT32_C(0xa0)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC UINT32_C(0xa1)
+ #define HWRM_TUNNEL_DST_PORT_FREE UINT32_C(0xa2)
+ #define HWRM_STAT_CTX_ALLOC UINT32_C(0xb0)
+ #define HWRM_STAT_CTX_FREE UINT32_C(0xb1)
+ #define HWRM_STAT_CTX_QUERY UINT32_C(0xb2)
+ #define HWRM_STAT_CTX_CLR_STATS UINT32_C(0xb3)
+ #define HWRM_PORT_QSTATS_EXT UINT32_C(0xb4)
+ #define HWRM_FW_RESET UINT32_C(0xc0)
+ #define HWRM_FW_QSTATUS UINT32_C(0xc1)
+ /* Experimental */
+ #define HWRM_FW_SET_TIME UINT32_C(0xc8)
+ /* Experimental */
+ #define HWRM_FW_GET_TIME UINT32_C(0xc9)
+ /* Experimental */
+ #define HWRM_FW_SET_STRUCTURED_DATA UINT32_C(0xca)
+ /* Experimental */
+ #define HWRM_FW_GET_STRUCTURED_DATA UINT32_C(0xcb)
+ /* Experimental */
+ #define HWRM_FW_IPC_MAILBOX UINT32_C(0xcc)
+ #define HWRM_EXEC_FWD_RESP UINT32_C(0xd0)
+ #define HWRM_REJECT_FWD_RESP UINT32_C(0xd1)
+ #define HWRM_FWD_RESP UINT32_C(0xd2)
+ #define HWRM_FWD_ASYNC_EVENT_CMPL UINT32_C(0xd3)
+ #define HWRM_OEM_CMD UINT32_C(0xd4)
+ #define HWRM_TEMP_MONITOR_QUERY UINT32_C(0xe0)
+ #define HWRM_WOL_FILTER_ALLOC UINT32_C(0xf0)
+ #define HWRM_WOL_FILTER_FREE UINT32_C(0xf1)
+ #define HWRM_WOL_FILTER_QCFG UINT32_C(0xf2)
+ #define HWRM_WOL_REASON_QCFG UINT32_C(0xf3)
+ /* Experimental */
+ #define HWRM_CFA_METER_PROFILE_ALLOC UINT32_C(0xf5)
+ /* Experimental */
+ #define HWRM_CFA_METER_PROFILE_FREE UINT32_C(0xf6)
+ /* Experimental */
+ #define HWRM_CFA_METER_PROFILE_CFG UINT32_C(0xf7)
+ /* Experimental */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC UINT32_C(0xf8)
+ /* Experimental */
+ #define HWRM_CFA_METER_INSTANCE_FREE UINT32_C(0xf9)
+ /* Experimental */
+ #define HWRM_CFA_VFR_ALLOC UINT32_C(0xfd)
+ /* Experimental */
+ #define HWRM_CFA_VFR_FREE UINT32_C(0xfe)
+ /* Experimental */
+ #define HWRM_CFA_VF_PAIR_ALLOC UINT32_C(0x100)
+ /* Experimental */
+ #define HWRM_CFA_VF_PAIR_FREE UINT32_C(0x101)
+ /* Experimental */
+ #define HWRM_CFA_VF_PAIR_INFO UINT32_C(0x102)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_ALLOC UINT32_C(0x103)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_FREE UINT32_C(0x104)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_FLUSH UINT32_C(0x105)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_STATS UINT32_C(0x106)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_INFO UINT32_C(0x107)
+ /* Experimental */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC UINT32_C(0x108)
+ /* Experimental */
+ #define HWRM_CFA_DECAP_FILTER_FREE UINT32_C(0x109)
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG UINT32_C(0x10a)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC UINT32_C(0x10b)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE UINT32_C(0x10c)
+ /* Experimental */
+ #define HWRM_CFA_PAIR_ALLOC UINT32_C(0x10d)
+ /* Experimental */
+ #define HWRM_CFA_PAIR_FREE UINT32_C(0x10e)
+ /* Experimental */
+ #define HWRM_CFA_PAIR_INFO UINT32_C(0x10f)
+ /* Experimental */
+ #define HWRM_FW_IPC_MSG UINT32_C(0x110)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO UINT32_C(0x111)
+ /* Engine CKV - Ping the device and SRT firmware to get the public key. */
+ #define HWRM_ENGINE_CKV_HELLO UINT32_C(0x12d)
+ /* Engine CKV - Get the current allocation status of keys provisioned in the key vault. */
+ #define HWRM_ENGINE_CKV_STATUS UINT32_C(0x12e)
+ /* Engine CKV - Add a new CKEK used to encrypt keys. */
+ #define HWRM_ENGINE_CKV_CKEK_ADD UINT32_C(0x12f)
+ /* Engine CKV - Delete a previously added CKEK. */
+ #define HWRM_ENGINE_CKV_CKEK_DELETE UINT32_C(0x130)
+ /* Engine CKV - Add a new key to the key vault. */
+ #define HWRM_ENGINE_CKV_KEY_ADD UINT32_C(0x131)
+ /* Engine CKV - Delete a key from the key vault. */
+ #define HWRM_ENGINE_CKV_KEY_DELETE UINT32_C(0x132)
+ /* Engine CKV - Delete all keys from the key vault. */
+ #define HWRM_ENGINE_CKV_FLUSH UINT32_C(0x133)
+ /* Engine CKV - Get random data. */
+ #define HWRM_ENGINE_CKV_RNG_GET UINT32_C(0x134)
+ /* Engine CKV - Generate and encrypt a new AES key. */
+ #define HWRM_ENGINE_CKV_KEY_GEN UINT32_C(0x135)
+ /* Engine - Query the available queue groups configuration. */
+ #define HWRM_ENGINE_QG_CONFIG_QUERY UINT32_C(0x13c)
+ /* Engine - Query the queue groups assigned to a function. */
+ #define HWRM_ENGINE_QG_QUERY UINT32_C(0x13d)
+ /* Engine - Query the available queue group meter profile configuration. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY UINT32_C(0x13e)
+ /* Engine - Query the configuration of a queue group meter profile. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY UINT32_C(0x13f)
+ /* Engine - Allocate a queue group meter profile. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC UINT32_C(0x140)
+ /* Engine - Free a queue group meter profile. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE UINT32_C(0x141)
+ /* Engine - Query the meters assigned to a queue group. */
+ #define HWRM_ENGINE_QG_METER_QUERY UINT32_C(0x142)
+ /* Engine - Bind a queue group meter profile to a queue group. */
+ #define HWRM_ENGINE_QG_METER_BIND UINT32_C(0x143)
+ /* Engine - Unbind a queue group meter profile from a queue group. */
+ #define HWRM_ENGINE_QG_METER_UNBIND UINT32_C(0x144)
+ /* Engine - Bind a queue group to a function. */
+ #define HWRM_ENGINE_QG_FUNC_BIND UINT32_C(0x145)
+ /* Engine - Query the scheduling group configuration. */
+ #define HWRM_ENGINE_SG_CONFIG_QUERY UINT32_C(0x146)
+ /* Engine - Query the queue groups assigned to a scheduling group. */
+ #define HWRM_ENGINE_SG_QUERY UINT32_C(0x147)
+ /* Engine - Query the configuration of a scheduling group's meter profiles. */
+ #define HWRM_ENGINE_SG_METER_QUERY UINT32_C(0x148)
+ /* Engine - Configure a scheduling group's meter profiles. */
+ #define HWRM_ENGINE_SG_METER_CONFIG UINT32_C(0x149)
+ /* Engine - Bind a queue group to a scheduling group. */
+ #define HWRM_ENGINE_SG_QG_BIND UINT32_C(0x14a)
+ /* Engine - Unbind a queue group from its scheduling group. */
+ #define HWRM_ENGINE_QG_SG_UNBIND UINT32_C(0x14b)
+ /* Engine - Query the Engine configuration. */
+ #define HWRM_ENGINE_CONFIG_QUERY UINT32_C(0x154)
+ /* Engine - Configure the statistics accumulator for an Engine. */
+ #define HWRM_ENGINE_STATS_CONFIG UINT32_C(0x155)
+ /* Engine - Clear the statistics accumulator for an Engine. */
+ #define HWRM_ENGINE_STATS_CLEAR UINT32_C(0x156)
+ /* Engine - Query the statistics accumulator for an Engine. */
+ #define HWRM_ENGINE_STATS_QUERY UINT32_C(0x157)
+ /* Engine - Allocate an Engine RQ. */
+ #define HWRM_ENGINE_RQ_ALLOC UINT32_C(0x15e)
+ /* Engine - Free an Engine RQ. */
+ #define HWRM_ENGINE_RQ_FREE UINT32_C(0x15f)
+ /* Engine - Allocate an Engine CQ. */
+ #define HWRM_ENGINE_CQ_ALLOC UINT32_C(0x160)
+ /* Engine - Free an Engine CQ. */
+ #define HWRM_ENGINE_CQ_FREE UINT32_C(0x161)
+ /* Engine - Allocate an NQ. */
+ #define HWRM_ENGINE_NQ_ALLOC UINT32_C(0x162)
+ /* Engine - Free an NQ. */
+ #define HWRM_ENGINE_NQ_FREE UINT32_C(0x163)
+ /* Engine - Set the on-die RQE credit update location. */
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS UINT32_C(0x164)
+ /* Experimental */
+ #define HWRM_FUNC_RESOURCE_QCAPS UINT32_C(0x190)
+ /* Experimental */
+ #define HWRM_FUNC_VF_RESOURCE_CFG UINT32_C(0x191)
+ /* Experimental */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS UINT32_C(0x192)
+ /* Experimental */
+ #define HWRM_FUNC_BACKING_STORE_CFG UINT32_C(0x193)
+ /* Experimental */
+ #define HWRM_FUNC_BACKING_STORE_QCFG UINT32_C(0x194)
+ /* Experimental */
+ #define HWRM_SELFTEST_QLIST UINT32_C(0x200)
+ /* Experimental */
+ #define HWRM_SELFTEST_EXEC UINT32_C(0x201)
+ /* Experimental */
+ #define HWRM_SELFTEST_IRQ UINT32_C(0x202)
+ /* Experimental */
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA UINT32_C(0x203)
+ /* Experimental */
+ #define HWRM_PCIE_QSTATS UINT32_C(0x204)
+ /* Experimental */
+ #define HWRM_DBG_READ_DIRECT UINT32_C(0xff10)
+ /* Experimental */
+ #define HWRM_DBG_READ_INDIRECT UINT32_C(0xff11)
+ /* Experimental */
+ #define HWRM_DBG_WRITE_DIRECT UINT32_C(0xff12)
+ /* Experimental */
+ #define HWRM_DBG_WRITE_INDIRECT UINT32_C(0xff13)
+ #define HWRM_DBG_DUMP UINT32_C(0xff14)
+ /* Experimental */
+ #define HWRM_DBG_ERASE_NVM UINT32_C(0xff15)
+ /* Experimental */
+ #define HWRM_DBG_CFG UINT32_C(0xff16)
+ /* Experimental */
+ #define HWRM_DBG_COREDUMP_LIST UINT32_C(0xff17)
+ /* Experimental */
+ #define HWRM_DBG_COREDUMP_INITIATE UINT32_C(0xff18)
+ /* Experimental */
+ #define HWRM_DBG_COREDUMP_RETRIEVE UINT32_C(0xff19)
+ /* */
+ #define HWRM_DBG_I2C_CMD UINT32_C(0xff1b)
+ /* Experimental */
+ #define HWRM_NVM_FACTORY_DEFAULTS UINT32_C(0xffee)
+ #define HWRM_NVM_VALIDATE_OPTION UINT32_C(0xffef)
+ #define HWRM_NVM_FLUSH UINT32_C(0xfff0)
+ #define HWRM_NVM_GET_VARIABLE UINT32_C(0xfff1)
+ #define HWRM_NVM_SET_VARIABLE UINT32_C(0xfff2)
+ #define HWRM_NVM_INSTALL_UPDATE UINT32_C(0xfff3)
+ #define HWRM_NVM_MODIFY UINT32_C(0xfff4)
+ #define HWRM_NVM_VERIFY_UPDATE UINT32_C(0xfff5)
+ #define HWRM_NVM_GET_DEV_INFO UINT32_C(0xfff6)
+ #define HWRM_NVM_ERASE_DIR_ENTRY UINT32_C(0xfff7)
+ #define HWRM_NVM_MOD_DIR_ENTRY UINT32_C(0xfff8)
+ #define HWRM_NVM_FIND_DIR_ENTRY UINT32_C(0xfff9)
+ #define HWRM_NVM_GET_DIR_ENTRIES UINT32_C(0xfffa)
+ #define HWRM_NVM_GET_DIR_INFO UINT32_C(0xfffb)
+ #define HWRM_NVM_RAW_DUMP UINT32_C(0xfffc)
+ #define HWRM_NVM_READ UINT32_C(0xfffd)
+ #define HWRM_NVM_WRITE UINT32_C(0xfffe)
+ #define HWRM_NVM_RAW_WRITE_BLK UINT32_C(0xffff)
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Return Codes */
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ uint16_t error_code;
+ /* Request was successfully executed by the HWRM. */
+ #define HWRM_ERR_CODE_SUCCESS UINT32_C(0x0)
+ /* The HWRM failed to execute the request. */
+ #define HWRM_ERR_CODE_FAIL UINT32_C(0x1)
+ /*
+ * The request contains invalid argument(s) or input
+ * parameters.
+ */
+ #define HWRM_ERR_CODE_INVALID_PARAMS UINT32_C(0x2)
+ /*
+ * The requester is not allowed to access the requested
+ * resource. This error code shall be provided in a
+ * response to a request to query or modify an existing
+ * resource that is not accessible by the requester.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED UINT32_C(0x3)
+ /*
+ * The HWRM is unable to allocate the requested resource.
+ * This code only applies to requests for HWRM resource
+ * allocations.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR UINT32_C(0x4)
+ /*
+ * Invalid combination of flags is specified in the
+ * request.
+ */
+ #define HWRM_ERR_CODE_INVALID_FLAGS UINT32_C(0x5)
+ /*
+ * Invalid combination of enables fields is specified in
+ * the request.
+ */
+ #define HWRM_ERR_CODE_INVALID_ENABLES UINT32_C(0x6)
+ /*
+ * Request contains a required TLV that is not supported by
+ * the installed version of firmware.
+ */
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV UINT32_C(0x7)
+ /*
+ * No firmware buffer available to accept the request. Driver
+ * should retry the request.
+ */
+ #define HWRM_ERR_CODE_NO_BUFFER UINT32_C(0x8)
+ /*
+ * Generic HWRM execution error that represents an
+ * internal error.
+ */
+ #define HWRM_ERR_CODE_HWRM_ERROR UINT32_C(0xf)
+ /* Unknown error */
+ #define HWRM_ERR_CODE_UNKNOWN_ERR UINT32_C(0xfffe)
+ /* Unsupported or invalid command */
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED UINT32_C(0xffff)
+ #define HWRM_ERR_CODE_LAST \
+ HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output */
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ /*
+ * Pass/Fail or error type
+ *
+ * Note: receiver to verify the in parameters, and fail the call
+ * with an error when appropriate
+ */
+ uint16_t error_code;
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+ /*
+ * This field is the length of the response in bytes. The
+ * last byte of the response is a valid flag that will read
+ * as '1' when the command has been completely written to
+ * memory.
+ */
+ uint16_t resp_len;
+ /* debug info for this error response. */
+ uint32_t opaque_0;
+ /* debug info for this error response. */
+ uint16_t opaque_1;
+ /*
+ * In the case of an error response, command specific error
+ * code is returned in this field.
+ */
+ uint8_t cmd_err;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
/*
- * Description: This structure is used to inform the NIC of a location for and
- * an aggregation buffer that will be used for packet data that is received. An
- * aggregation buffer creates a different kind of completion operation for a
- * packet where a variable number of BDs may be used to place the packet in the
- * host. RX Rings that have aggregation buffers are known as aggregation rings
- * and must contain only aggregation buffers.
+ * Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
*/
-/* Short TX BD (16 bytes) */
+#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
+/* hwrm_func_buf_rgtr */
+#define HWRM_MAX_REQ_LEN 128
+/* hwrm_selftest_qlist */
+#define HWRM_MAX_RESP_LEN 280
+/* 7 bit indirection table index. */
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+/* valid key for HWRM response */
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 9
+#define HWRM_VERSION_UPDATE 2
+/* non-zero means beta version */
+#define HWRM_VERSION_RSVD 6
+#define HWRM_VERSION_STR "1.9.2.6"
+
+/****************
+ * hwrm_ver_get *
+ ****************/
+
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification.
+ */
+ uint8_t hwrm_intf_maj;
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * A change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification.
+ * This can be due to addition or removal of functionality.
+ * HWRM interface specifications with the same major version
+ * but different minor versions are compatible.
+ */
+ uint8_t hwrm_intf_min;
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * The interface update version is used to reflect minor
+ * changes or bug fixes to a released HWRM interface
+ * specification.
+ */
+ uint8_t hwrm_intf_upd;
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 1 in this field.
+ */
+ uint8_t hwrm_intf_maj_8b;
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * A change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification.
+ * This can be due to addition or removal of functionality.
+ * HWRM interface specifications with the same major version
+ * but different minor versions are compatible.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 2 in this field.
+ */
+ uint8_t hwrm_intf_min_8b;
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * The interface update version is used to reflect minor
+ * changes or bug fixes to a released HWRM interface
+ * specification.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 2 in this field.
+ */
+ uint8_t hwrm_intf_upd_8b;
+ uint8_t hwrm_intf_rsvd_8b;
+ /*
+ * This field represents the major version of HWRM firmware.
+ * A change in firmware major version represents a major
+ * firmware release.
+ */
+ uint8_t hwrm_fw_maj_8b;
+ /*
+ * This field represents the minor version of HWRM firmware.
+ * A change in firmware minor version represents significant
+ * firmware functionality changes.
+ */
+ uint8_t hwrm_fw_min_8b;
+ /*
+ * This field represents the build version of HWRM firmware.
+ * A change in firmware build version represents bug fixes
+ * to a released firmware.
+ */
+ uint8_t hwrm_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version of the
+ * HWRM firmware.
+ */
+ uint8_t hwrm_fw_rsvd_8b;
+ /*
+ * This field represents the major version of mgmt firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t mgmt_fw_maj_8b;
+ /*
+ * This field represents the minor version of mgmt firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint8_t mgmt_fw_min_8b;
+ /*
+ * This field represents the build version of mgmt firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t mgmt_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint8_t mgmt_fw_rsvd_8b;
+ /*
+ * This field represents the major version of network
+ * control firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t netctrl_fw_maj_8b;
+ /*
+ * This field represents the minor version of network
+ * control firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint8_t netctrl_fw_min_8b;
+ /*
+ * This field represents the build version of network
+ * control firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t netctrl_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint8_t netctrl_fw_rsvd_8b;
+ /*
+ * This field is used to indicate device's capabilities and
+ * configurations.
+ */
+ uint32_t dev_caps_cfg;
+ /*
+ * If set to 1, then secure firmware update behavior
+ * is supported.
+ * If set to 0, then secure firmware update behavior is
+ * not supported.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then firmware based DCBX agent is supported.
+ * If set to 0, then firmware based DCBX agent capability
+ * is not supported on this device.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, then HWRM short command format is supported.
+ * If set to 0, then HWRM short command format is not supported.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, then HWRM short command format is required.
+ * If set to 0, then HWRM short command format is not required.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED \
+ UINT32_C(0x8)
+ /*
+ * This field represents the major version of RoCE firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t roce_fw_maj_8b;
+ /*
+ * This field represents the minor version of RoCE firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint8_t roce_fw_min_8b;
+ /*
+ * This field represents the build version of RoCE firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t roce_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint8_t roce_fw_rsvd_8b;
+ /*
+ * This field represents the name of HWRM FW (ASCII chars
+ * with NULL at the end).
+ */
+ char hwrm_fw_name[16];
+ /*
+ * This field represents the name of mgmt FW (ASCII chars
+ * with NULL at the end).
+ */
+ char mgmt_fw_name[16];
+ /*
+ * This field represents the name of network control
+ * firmware (ASCII chars with NULL at the end).
+ */
+ char netctrl_fw_name[16];
+ /*
+ * This field is reserved for future use.
+ * The responder should set it to 0.
+ * The requester should ignore this field.
+ */
+ uint8_t reserved2[16];
+ /*
+ * This field represents the name of RoCE FW (ASCII chars
+ * with NULL at the end).
+ */
+ char roce_fw_name[16];
+ /* This field returns the chip number. */
+ uint16_t chip_num;
+ /* This field returns the revision of chip. */
+ uint8_t chip_rev;
+ /* This field returns the chip metal number. */
+ uint8_t chip_metal;
+ /* This field returns the bond id of the chip. */
+ uint8_t chip_bond_id;
+ /* This value indicates the type of platform used for chip implementation. */
+ uint8_t chip_platform_type;
+ /* ASIC */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0)
+ /* FPGA platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1)
+ /* Palladium platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_LAST \
+ HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM
+ /*
+ * This field returns the maximum value of request window that
+ * is supported by the HWRM. The request window is mapped
+ * into device address space using MMIO.
+ */
+ uint16_t max_req_win_len;
+ /*
+ * This field returns the maximum value of response buffer in
+ * bytes.
+ */
+ uint16_t max_resp_len;
+ /*
+ * This field returns the default request timeout value in
+ * milliseconds.
+ */
+ uint16_t def_req_timeout;
+ /*
+ * This field will indicate if any subsystems is not fully
+ * initialized.
+ */
+ uint8_t flags;
+ /*
+ * If set to 1, device is not ready.
+ * If set to 0, device is ready to accept all HWRM commands.
+ */
+ #define HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY UINT32_C(0x1)
+ /*
+ * If set to 1, external version present.
+ * If set to 0, external version not present.
+ */
+ #define HWRM_VER_GET_OUTPUT_FLAGS_EXT_VER_AVAIL UINT32_C(0x2)
+ uint8_t unused_0[2];
+ /*
+ * For backward compatibility this field must be set to 1.
+ * Older drivers might look for this field to be 1 before
+ * processing the message.
+ */
+ uint8_t always_1;
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification. A HWRM implementation that is
+ * compliant with this specification shall provide value of 1
+ * in this field.
+ */
+ uint16_t hwrm_intf_major;
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * A change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification. This can be due to addition or
+ * removal of functionality. HWRM interface specifications
+ * with the same major version but different minor versions are
+ * compatible. A HWRM implementation that is compliant with
+ * this specification shall provide value of 2 in this field.
+ */
+ uint16_t hwrm_intf_minor;
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the HWRM implementation. The
+ * interface update version is used to reflect minor changes or
+ * bug fixes to a released HWRM interface specification.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 2 in this field.
+ */
+ uint16_t hwrm_intf_build;
+ /*
+ * This field represents the patch version of HWRM interface
+ * specification supported by the HWRM implementation.
+ */
+ uint16_t hwrm_intf_patch;
+ /*
+ * This field represents the major version of HWRM firmware.
+ * A change in firmware major version represents a major
+ * firmware release.
+ */
+ uint16_t hwrm_fw_major;
+ /*
+ * This field represents the minor version of HWRM firmware.
+ * A change in firmware minor version represents significant
+ * firmware functionality changes.
+ */
+ uint16_t hwrm_fw_minor;
+ /*
+ * This field represents the build version of HWRM firmware.
+ * A change in firmware build version represents bug fixes to
+ * a released firmware.
+ */
+ uint16_t hwrm_fw_build;
+ /*
+ * This field is a reserved field.
+ * This field can be used to represent firmware branches or customer
+ * specific releases tied to a specific (major,minor,update) version
+ * of the HWRM firmware.
+ */
+ uint16_t hwrm_fw_patch;
+ /*
+ * This field represents the major version of mgmt firmware.
+ * A change in major version represents a major release.
+ */
+ uint16_t mgmt_fw_major;
+ /*
+ * This field represents the minor version of HWRM firmware.
+ * A change in firmware minor version represents significant
+ * firmware functionality changes.
+ */
+ uint16_t mgmt_fw_minor;
+ /*
+ * This field represents the build version of mgmt firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint16_t mgmt_fw_build;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version.
+ */
+ uint16_t mgmt_fw_patch;
+ /*
+ * This field represents the major version of network control
+ * firmware. A change in major version represents
+ * a major release.
+ */
+ uint16_t netctrl_fw_major;
+ /*
+ * This field represents the minor version of network control
+ * firmware. A change in minor version represents significant
+ * functionality changes.
+ */
+ uint16_t netctrl_fw_minor;
+ /*
+ * This field represents the build version of network control
+ * firmware. A change in update version represents bug fixes.
+ */
+ uint16_t netctrl_fw_build;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint16_t netctrl_fw_patch;
+ /*
+ * This field represents the major version of RoCE firmware.
+ * A change in major version represents a major release.
+ */
+ uint16_t roce_fw_major;
+ /*
+ * This field represents the minor version of RoCE firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint16_t roce_fw_minor;
+ /*
+ * This field represents the build version of RoCE firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint16_t roce_fw_build;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint16_t roce_fw_patch;
+ /*
+ * This field returns the maximum extended request length acceptable
+ * by the device which allows requests greater than mailbox size when
+ * used with the short cmd request format.
+ */
+ uint16_t max_ext_req_len;
+ uint8_t unused_1[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* bd_base (size:64b/8B) */
+struct bd_base {
+ uint8_t type;
+ /* This value identifies the type of buffer descriptor. */
+ #define BD_BASE_TYPE_MASK UINT32_C(0x3f)
+ #define BD_BASE_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define BD_BASE_TYPE_TX_BD_SHORT UINT32_C(0x0)
+ /*
+ * Indicates that this BD is 1BB long and is an empty
+ * TX BD. Not valid for use by the driver.
+ */
+ #define BD_BASE_TYPE_TX_BD_EMPTY UINT32_C(0x1)
+ /*
+ * Indicates that this BD is 16B long and is an RX Producer
+ * (ie. empty) buffer descriptor.
+ */
+ #define BD_BASE_TYPE_RX_PROD_PKT UINT32_C(0x4)
+ /*
+ * Indicates that this BD is 16B long and is an RX
+ * Producer Buffer BD.
+ */
+ #define BD_BASE_TYPE_RX_PROD_BFR UINT32_C(0x5)
+ /*
+ * Indicates that this BD is 16B long and is an
+ * RX Producer Assembly Buffer Descriptor.
+ */
+ #define BD_BASE_TYPE_RX_PROD_AGG UINT32_C(0x6)
+ /*
+ * Indicates that this BD is 32B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define BD_BASE_TYPE_TX_BD_LONG UINT32_C(0x10)
+ #define BD_BASE_TYPE_LAST BD_BASE_TYPE_TX_BD_LONG
+ uint8_t unused_1[7];
+} __attribute__((packed));
+
+/* tx_bd_short (size:128b/16B) */
struct tx_bd_short {
- uint16_t flags_type;
/*
- * All bits in this field must be valid on the first BD of a
- * packet. Only the packet_end bit must be valid for the
- * remaining BDs of a packet.
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs
+ * of a packet.
*/
+ uint16_t flags_type;
/* This value identifies the type of buffer descriptor. */
- #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f)
- #define TX_BD_SHORT_TYPE_SFT 0
+ #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_SHORT_TYPE_SFT 0
/*
- * Indicates that this BD is 16B long and is
- * used for normal L2 packet transmission.
+ * Indicates that this BD is 16B long and is used for
+ * normal L2 packet transmission.
*/
- #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0)
+ #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0)
+ #define TX_BD_SHORT_TYPE_LAST TX_BD_SHORT_TYPE_TX_BD_SHORT
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs
+ * of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_SHORT_FLAGS_SFT 6
/*
* If set to 1, the packet ends with the data in the buffer
- * pointed to by this descriptor. This flag must be valid on
- * every BD.
+ * pointed to by this descriptor. This flag must be
+ * valid on every BD.
*/
- #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40)
+ #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40)
/*
* If set to 1, the device will not generate a completion for
* this transmit packet unless there is an error in it's
- * processing. If this bit is set to 0, then the packet will be
- * completed normally. This bit must be valid only on the first
- * BD of a packet.
+ * processing.
+ * If this bit
+ * is set to 0, then the packet will be completed normally.
+ *
+ * This bit must be valid only on the first BD of a packet.
*/
- #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80)
+ #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80)
/*
* This value indicates how many 16B BD locations are consumed
- * in the ring by this packet. A value of 1 indicates that this
- * BD is the only BD (and that the it is a short BD). A value of
- * 3 indicates either 3 short BDs or 1 long BD and one short BD
- * in the packet. A value of 0 indicates that there are 32 BD
- * locations in the packet (the maximum). This field is valid
- * only on the first BD of a packet.
- */
- #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
- #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8
- /*
- * This value is a hint for the length of the entire packet. It
- * is used by the chip to optimize internal processing. The
- * packet will be dropped if the hint is too short. This field
- * is valid only on the first BD of a packet.
- */
- #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000)
- #define TX_BD_SHORT_FLAGS_LHINT_SFT 13
+ * in the ring by this packet.
+ * A value of 1 indicates that this BD is the only BD (and that
+ * the it is a short BD). A value
+ * of 3 indicates either 3 short BDs or 1 long BD and one short
+ * BD in the packet. A value of 0 indicates
+ * that there are 32 BD locations in the packet (the maximum).
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet.
+ * It is used by the chip to optimize internal processing.
+ *
+ * The packet will be dropped if the hint is too short.
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_SHORT_FLAGS_LHINT_SFT 13
/* indicates packet length < 512B */
- #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
/* indicates 512 <= packet length < 1KB */
- #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
/* indicates 1KB <= packet length < 2KB */
- #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
/* indicates packet length >= 2KB */
- #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
#define TX_BD_SHORT_FLAGS_LHINT_LAST \
TX_BD_SHORT_FLAGS_LHINT_GTE2K
/*
@@ -256,961 +1268,1111 @@ struct tx_bd_short {
* Index after the buffer associated with this descriptor has
* been transferred via DMA to NIC memory from host memory. An
* interrupt may or may not be generated according to the state
- * of the interrupt avoidance mechanisms. If this bit is set to
- * 0, then the Consumer Index is only updated as soon as one of
- * the host interrupt coalescing conditions has been met. This
- * bit must be valid on the first BD of a packet.
+ * of the interrupt avoidance mechanisms. If this bit
+ * is set to 0, then the Consumer Index is only updated as soon
+ * as one of the host interrupt coalescing conditions has been met.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000)
+ #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000)
/*
- * All bits in this field must be valid on the first BD of a
- * packet. Only the packet_end bit must be valid for the
- * remaining BDs of a packet.
+ * This is the length of the host physical buffer this BD describes
+ * in bytes.
+ *
+ * This field must be valid on all BDs of a packet.
*/
- #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0)
- #define TX_BD_SHORT_FLAGS_SFT 6
- uint16_t len;
+ uint16_t len;
/*
- * This is the length of the host physical buffer this BD
- * describes in bytes. This field must be valid on all BDs of a
- * packet.
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with the
+ * transmit BD.
+ *
+ * This field must be valid on the first BD of a packet.
*/
- uint32_t opaque;
+ uint32_t opaque;
/*
- * The opaque data field is pass through to the completion and
- * can be used for any data that the driver wants to associate
- * with the transmit BD. This field must be valid on the first
- * BD of a packet.
- */
- uint64_t addr;
- /*
- * This is the host physical address for the portion of the
- * packet described by this TX BD. This value must be valid on
- * all BDs of a packet.
+ * This is the host physical address for the portion of the packet
+ * described by this TX BD.
+ *
+ * This value must be valid on all BDs of a packet.
*/
+ uint64_t address;
} __attribute__((packed));
-/* Long TX BD (32 bytes split to 2 16-byte struct) */
+/* tx_bd_long (size:128b/16B) */
struct tx_bd_long {
- uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
+ /*
+ * This value indicates the type of buffer descriptor.
+ * packet.
+ */
+ #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_LONG_TYPE_SFT 0
/*
- * All bits in this field must be valid on the first BD of a
- * packet. Only the packet_end bit must be valid for the
- * remaining BDs of a packet.
+ * Indicates that this BD is 32B long and is used for
+ * normal L2 packet transmission.
*/
- /* This value identifies the type of buffer descriptor. */
- #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f)
- #define TX_BD_LONG_TYPE_SFT 0
+ #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10)
+ #define TX_BD_LONG_TYPE_LAST TX_BD_LONG_TYPE_TX_BD_LONG
/*
- * Indicates that this BD is 32B long and is
- * used for normal L2 packet transmission.
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs
+ * of a packet.
*/
- #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10)
+ #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_LONG_FLAGS_SFT 6
/*
* If set to 1, the packet ends with the data in the buffer
- * pointed to by this descriptor. This flag must be valid on
- * every BD.
+ * pointed to by this descriptor. This flag must be
+ * valid on every BD.
*/
- #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40)
+ #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40)
/*
* If set to 1, the device will not generate a completion for
* this transmit packet unless there is an error in it's
- * processing. If this bit is set to 0, then the packet will be
- * completed normally. This bit must be valid only on the first
- * BD of a packet.
+ * processing.
+ * If this bit
+ * is set to 0, then the packet will be completed normally.
+ *
+ * This bit must be valid only on the first BD of a packet.
*/
- #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80)
+ #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80)
/*
* This value indicates how many 16B BD locations are consumed
- * in the ring by this packet. A value of 1 indicates that this
- * BD is the only BD (and that the it is a short BD). A value of
- * 3 indicates either 3 short BDs or 1 long BD and one short BD
- * in the packet. A value of 0 indicates that there are 32 BD
- * locations in the packet (the maximum). This field is valid
- * only on the first BD of a packet.
- */
- #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
- #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8
- /*
- * This value is a hint for the length of the entire packet. It
- * is used by the chip to optimize internal processing. The
- * packet will be dropped if the hint is too short. This field
- * is valid only on the first BD of a packet.
- */
- #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000)
- #define TX_BD_LONG_FLAGS_LHINT_SFT 13
+ * in the ring by this packet.
+ * A value of 1 indicates that this BD is the only BD (and that
+ * the it is a short BD). A value
+ * of 3 indicates either 3 short BDs or 1 long BD and one short
+ * BD in the packet. A value of 0 indicates
+ * that there are 32 BD locations in the packet (the maximum).
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet.
+ * It is used by the chip to optimize internal processing.
+ *
+ * The packet will be dropped if the hint is too short.
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_LONG_FLAGS_LHINT_SFT 13
/* indicates packet length < 512B */
- #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
/* indicates 512 <= packet length < 1KB */
- #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
/* indicates 1KB <= packet length < 2KB */
- #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
/* indicates packet length >= 2KB */
- #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
- #define TX_BD_LONG_FLAGS_LHINT_LAST \
- TX_BD_LONG_FLAGS_LHINT_GTE2K
+ #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K
/*
* If set to 1, the device immediately updates the Send Consumer
* Index after the buffer associated with this descriptor has
* been transferred via DMA to NIC memory from host memory. An
* interrupt may or may not be generated according to the state
- * of the interrupt avoidance mechanisms. If this bit is set to
- * 0, then the Consumer Index is only updated as soon as one of
- * the host interrupt coalescing conditions has been met. This
- * bit must be valid on the first BD of a packet.
- */
- #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000)
- /*
- * All bits in this field must be valid on the first BD of a
- * packet. Only the packet_end bit must be valid for the
- * remaining BDs of a packet.
+ * of the interrupt avoidance mechanisms. If this bit
+ * is set to 0, then the Consumer Index is only updated as soon
+ * as one of the host interrupt coalescing conditions has been met.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0)
- #define TX_BD_LONG_FLAGS_SFT 6
- uint16_t len;
+ #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000)
/*
- * This is the length of the host physical buffer this BD
- * describes in bytes. This field must be valid on all BDs of a
- * packet.
+ * This is the length of the host physical buffer this BD describes
+ * in bytes.
+ *
+ * This field must be valid on all BDs of a packet.
*/
- uint32_t opaque;
+ uint16_t len;
/*
- * The opaque data field is pass through to the completion and
- * can be used for any data that the driver wants to associate
- * with the transmit BD. This field must be valid on the first
- * BD of a packet.
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with the
+ * transmit BD.
+ *
+ * This field must be valid on the first BD of a packet.
*/
- uint64_t addr;
+ uint32_t opaque;
/*
- * This is the host physical address for the portion of the
- * packet described by this TX BD. This value must be valid on
- * all BDs of a packet.
+ * This is the host physical address for the portion of the packet
+ * described by this TX BD.
+ *
+ * This value must be valid on all BDs of a packet.
*/
+ uint64_t address;
} __attribute__((packed));
-/* last 16 bytes of Long TX BD */
+/* tx_bd_long_hi (size:128b/16B) */
struct tx_bd_long_hi {
- uint16_t lflags;
/*
- * All bits in this field must be valid on the first BD of a
- * packet. Their value on other BDs of the packet will be
- * ignored.
+ * All bits in this field must be valid on the first BD of a packet.
+ * Their value on other BDs of the packet will be ignored.
*/
+ uint16_t lflags;
/*
* If set to 1, the controller replaces the TCP/UPD checksum
* fields of normal TCP/UPD checksum, or the inner TCP/UDP
* checksum field of the encapsulated TCP/UDP packets with the
- * hardware calculated TCP/UDP checksum for the packet
- * associated with this descriptor. The flag is ignored if the
- * LSO flag is set. This bit must be valid on the first BD of a
- * packet.
+ * hardware calculated TCP/UDP checksum for the packet associated
+ * with this descriptor. The flag is ignored if the LSO flag is set.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
+ #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
/*
- * If set to 1, the controller replaces the IP checksum of the
+ * If set to 1, the controller replaces the IP checksum of the
* normal packets, or the inner IP checksum of the encapsulated
* packets with the hardware calculated IP checksum for the
- * packet associated with this descriptor. This bit must be
- * valid on the first BD of a packet.
+ * packet associated with this descriptor.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2)
+ #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2)
/*
* If set to 1, the controller will not append an Ethernet CRC
- * to the end of the frame. This bit must be valid on the first
- * BD of a packet. Packet must be 64B or longer when this flag
- * is set. It is not useful to use this bit with any form of TX
- * offload such as CSO or LSO. The intent is that the packet
- * from the host already has a valid Ethernet CRC on the packet.
+ * to the end of the frame.
+ *
+ * This bit must be valid on the first BD of a packet.
+ *
+ * Packet must be 64B or longer when this flag is set. It is not
+ * useful to use this bit with any form of TX offload such as
+ * CSO or LSO. The intent is that the packet from the host already
+ * has a valid Ethernet CRC on the packet.
*/
- #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4)
+ #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4)
/*
- * If set to 1, the device will record the time at which the
- * packet was actually transmitted at the TX MAC. This bit must
- * be valid on the first BD of a packet.
+ * If set to 1, the device will record the time at which the packet
+ * was actually transmitted at the TX MAC.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8)
+ #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8)
/*
* If set to 1, The controller replaces the tunnel IP checksum
* field with hardware calculated IP checksum for the IP header
- * of the packet associated with this descriptor. For outer UDP
- * checksum, global outer UDP checksum TE_NIC register needs to
- * be enabled. If the global outer UDP checksum TE_NIC register
- * bit is set, outer UDP checksum will be calculated for the
- * following cases: 1. Packets with tcp_udp_chksum flag set to
- * offload checksum for inner packet AND the inner packet is
- * TCP/UDP. If the inner packet is ICMP for example (non-
- * TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP
- * checksum will not be calculated. 2. Packets with lso flag set
- * which implies inner TCP checksum calculation as part of LSO
- * operation.
- */
- #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
- /*
- * If set to 1, the device will treat this packet with LSO(Large
+ * of the packet associated with this descriptor.
+ *
+ * For outer UDP checksum, global outer UDP checksum TE_NIC register
+ * needs to be enabled. If the global outer UDP checksum TE_NIC register
+ * bit is set, outer UDP checksum will be calculated for the following
+ * cases:
+ * 1. Packets with tcp_udp_chksum flag set to offload checksum for inner
+ * packet AND the inner packet is TCP/UDP. If the inner packet is ICMP for
+ * example (non-TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP
+ * checksum will not be calculated.
+ * 2. Packets with lso flag set which implies inner TCP checksum calculation
+ * as part of LSO operation.
+ */
+ #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
+ /*
+ * If set to 1, the device will treat this packet with LSO(Large
* Send Offload) processing for both normal or encapsulated
- * packets, which is a form of TCP segmentation. When this bit
+ * packets, which is a form of TCP segmentation. When this bit
* is 1, the hdr_size and mss fields must be valid. The driver
- * doesn't need to set t_ip_chksum, ip_chksum, and
- * tcp_udp_chksum flags since the controller will replace the
- * appropriate checksum fields for segmented packets. When this
- * bit is 1, the hdr_size and mss fields must be valid.
+ * doesn't need to set t_ip_chksum, ip_chksum, and tcp_udp_chksum
+ * flags since the controller will replace the appropriate
+ * checksum fields for segmented packets.
+ *
+ * When this bit is 1, the hdr_size and mss fields must be valid.
*/
- #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20)
+ #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20)
/*
* If set to zero when LSO is '1', then the IPID will be treated
* as a 16b number and will be wrapped if it exceeds a value of
- * 0xffff. If set to one when LSO is '1', then the IPID will be
- * treated as a 15b number and will be wrapped if it exceeds a
- * value 0f 0x7fff.
+ * 0xffff.
+ *
+ * If set to one when LSO is '1', then the IPID will be treated
+ * as a 15b number and will be wrapped if it exceeds a value 0f
+ * 0x7fff.
*/
- #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
+ #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
/*
* If set to zero when LSO is '1', then the IPID of the tunnel
- * IP header will not be modified during LSO operations. If set
- * to one when LSO is '1', then the IPID of the tunnel IP header
- * will be incremented for each subsequent segment of an LSO
- * operation. The flag is ignored if the LSO packet is a normal
- * (non-tunneled) TCP packet.
+ * IP header will not be modified during LSO operations.
+ *
+ * If set to one when LSO is '1', then the IPID of the tunnel
+ * IP header will be incremented for each subsequent segment of an
+ * LSO operation.
+ *
+ * The flag is ignored if the LSO packet is a normal (non-tunneled)
+ * TCP packet.
*/
- #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
+ #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
/*
* If set to '1', then the RoCE ICRC will be appended to the
- * packet. Packet must be a valid RoCE format packet.
+ * packet. Packet must be a valid RoCE format packet.
*/
- #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100)
+ #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100)
/*
* If set to '1', then the FCoE CRC will be appended to the
- * packet. Packet must be a valid FCoE format packet.
+ * packet. Packet must be a valid FCoE format packet.
*/
- #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200)
- uint16_t hdr_size;
+ #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200)
+ uint16_t hdr_size;
/*
* When LSO is '1', this field must contain the offset of the
- * TCP payload from the beginning of the packet in as 16b words.
- * In case of encapsulated/tunneling packet, this field contains
- * the offset of the inner TCP payload from beginning of the
- * packet as 16-bit words. This value must be valid on the first
- * BD of a packet.
- */
- #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff)
- #define TX_BD_LONG_HDR_SIZE_SFT 0
- uint32_t mss;
- /*
- * This is the MSS value that will be used to do the LSO
- * processing. The value is the length in bytes of the TCP
- * payload for each segment generated by the LSO operation. This
- * value must be valid on the first BD of a packet.
- */
- #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff)
- #define TX_BD_LONG_MSS_SFT 0
- uint16_t unused_2;
- uint16_t cfa_action;
- /*
- * This value selects a CFA action to perform on the packet. Set
- * this value to zero if no CFA action is desired. This value
- * must be valid on the first BD of a packet.
- */
- uint32_t cfa_meta;
- /*
- * This value is action meta-data that defines CFA edit
- * operations that are done in addition to any action editing.
- */
+ * TCP payload from the beginning of the packet in as
+ * 16b words. In case of encapsulated/tunneling packet, this field
+ * contains the offset of the inner TCP payload from beginning of the
+ * packet as 16-bit words.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff)
+ #define TX_BD_LONG_HDR_SIZE_SFT 0
+ uint32_t mss;
+ /*
+ * This is the MSS value that will be used to do the LSO processing.
+ * The value is the length in bytes of the TCP payload for each
+ * segment generated by the LSO operation.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff)
+ #define TX_BD_LONG_MSS_SFT 0
+ uint16_t unused2;
+ /*
+ * This value selects a CFA action to perform on the packet.
+ * Set this value to zero if no CFA action is desired.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ uint16_t cfa_action;
+ /*
+ * This value is action meta-data that defines CFA edit operations
+ * that are done in addition to any action editing.
+ */
+ uint32_t cfa_meta;
/* When key=1, This is the VLAN tag VID value. */
- #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
- #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0
+ #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
+ #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0
/* When key=1, This is the VLAN tag DE value. */
- #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000)
+ #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000)
/* When key=1, This is the VLAN tag PRI value. */
- #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
- #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13
+ #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
+ #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13
/* When key=1, This is the VLAN tag TPID select value. */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
- #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16
/* 0x88a8 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
/* 0x8100 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
/* 0x9100 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
/* 0x9200 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
/* 0x9300 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
/* Value programmed in CFA VLANTPID register. */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
#define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG
/* When key=1, This is the VLAN tag TPID select value. */
- #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
- #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19
+ #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
+ #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19
/*
- * This field identifies the type of edit to be performed on the
- * packet. This value must be valid on the first BD of a packet.
+ * This field identifies the type of edit to be performed
+ * on the packet.
+ *
+ * This value must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000)
- #define TX_BD_LONG_CFA_META_KEY_SFT 28
+ #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000)
+ #define TX_BD_LONG_CFA_META_KEY_SFT 28
/* No editing */
- #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
+ #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
/*
- * - meta[17:16] - TPID select value (0 =
- * 0x8100). - meta[15:12] - PRI/DE value. -
- * meta[11:0] - VID value.
+ * - meta[17:16] - TPID select value (0 = 0x8100).
+ * - meta[15:12] - PRI/DE value.
+ * - meta[11:0] - VID value.
*/
- #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
+ #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
#define TX_BD_LONG_CFA_META_KEY_LAST \
TX_BD_LONG_CFA_META_KEY_VLAN_TAG
} __attribute__((packed));
-/* RX Producer Packet BD (16 bytes) */
+/* tx_bd_empty (size:128b/16B) */
+struct tx_bd_empty {
+ /* This value identifies the type of buffer descriptor. */
+ uint8_t type;
+ #define TX_BD_EMPTY_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_EMPTY_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 1BB long and is an empty
+ * TX BD. Not valid for use by the driver.
+ */
+ #define TX_BD_EMPTY_TYPE_TX_BD_EMPTY UINT32_C(0x1)
+ #define TX_BD_EMPTY_TYPE_LAST TX_BD_EMPTY_TYPE_TX_BD_EMPTY
+ uint8_t unused_1[3];
+ uint8_t unused_2;
+ uint8_t unused_3[3];
+ uint8_t unused_4[8];
+} __attribute__((packed));
+
+/* rx_prod_pkt_bd (size:128b/16B) */
struct rx_prod_pkt_bd {
- uint16_t flags_type;
/* This value identifies the type of buffer descriptor. */
- #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f)
- #define RX_PROD_PKT_BD_TYPE_SFT 0
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_PKT_BD_TYPE_SFT 0
/*
- * Indicates that this BD is 16B long and is an
- * RX Producer (ie. empty) buffer descriptor.
+ * Indicates that this BD is 16B long and is an RX Producer
+ * (ie. empty) buffer descriptor.
*/
- #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4)
+ #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4)
+ #define RX_PROD_PKT_BD_TYPE_LAST \
+ RX_PROD_PKT_BD_TYPE_RX_PROD_PKT
+ #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_PKT_BD_FLAGS_SFT 6
/*
* If set to 1, the packet will be placed at the address plus
- * 2B. The 2 Bytes of padding will be written as zero.
+ * 2B. The 2 Bytes of padding will be written as zero.
*/
- /*
- * This is intended to be used when the host buffer is cache-
- * line aligned to produce packets that are easy to parse in
- * host memory while still allowing writes to be cache line
- * aligned.
- */
- #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40)
+ #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40)
/*
* If set to 1, the packet write will be padded out to the
* nearest cache-line with zero value padding.
*/
+ #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80)
+ /*
+ * This value is the number of additional buffers in the ring that
+ * describe the buffer space to be consumed for the this packet.
+ * If the value is zero, then the packet must fit within the
+ * space described by this BD. If this value is 1 or more, it
+ * indicates how many additional "buffer" BDs are in the ring
+ * immediately following this BD to be used for the same
+ * network packet.
+ *
+ * Even if the packet to be placed does not need all the
+ * additional buffers, they will be consumed anyway.
+ */
+ #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300)
+ #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8
/*
- * If receive buffers start/end on cache-line boundaries, this
- * feature will ensure that all data writes on the PCI bus
- * start/end on cache line boundaries.
+ * This is the length in Bytes of the host physical buffer where
+ * data for the packet may be placed in host memory.
*/
- #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80)
+ uint16_t len;
/*
- * This value is the number of additional buffers in the ring
- * that describe the buffer space to be consumed for the this
- * packet. If the value is zero, then the packet must fit within
- * the space described by this BD. If this value is 1 or more,
- * it indicates how many additional "buffer" BDs are in the ring
- * immediately following this BD to be used for the same network
- * packet. Even if the packet to be placed does not need all the
- * additional buffers, they will be consumed anyway.
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with this
+ * receive buffer set.
+ */
+ uint32_t opaque;
+ /*
+ * This is the host physical address where data for the packet may
+ * by placed in host memory.
+ */
+ uint64_t address;
+} __attribute__((packed));
+
+/* rx_prod_bfr_bd (size:128b/16B) */
+struct rx_prod_bfr_bd {
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_BFR_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_BFR_BD_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is an RX
+ * Producer Buffer BD.
*/
- #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300)
- #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8
- #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_PROD_PKT_BD_FLAGS_SFT 6
- uint16_t len;
+ #define RX_PROD_BFR_BD_TYPE_RX_PROD_BFR UINT32_C(0x5)
+ #define RX_PROD_BFR_BD_TYPE_LAST RX_PROD_BFR_BD_TYPE_RX_PROD_BFR
+ #define RX_PROD_BFR_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_BFR_BD_FLAGS_SFT 6
/*
* This is the length in Bytes of the host physical buffer where
* data for the packet may be placed in host memory.
*/
+ uint16_t len;
+ /* This field is not used. */
+ uint32_t opaque;
/*
- * While this is a Byte resolution value, it is often
- * advantageous to ensure that the buffers provided end on a
- * host cache line.
+ * This is the host physical address where data for the packet may
+ * by placed in host memory.
+ */
+ uint64_t address;
+} __attribute__((packed));
+
+/* rx_prod_agg_bd (size:128b/16B) */
+struct rx_prod_agg_bd {
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_AGG_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_AGG_BD_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is an
+ * RX Producer Assembly Buffer Descriptor.
+ */
+ #define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG UINT32_C(0x6)
+ #define RX_PROD_AGG_BD_TYPE_LAST \
+ RX_PROD_AGG_BD_TYPE_RX_PROD_AGG
+ #define RX_PROD_AGG_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_AGG_BD_FLAGS_SFT 6
+ /*
+ * If set to 1, the packet write will be padded out to the
+ * nearest cache-line with zero value padding.
*/
- uint32_t opaque;
+ #define RX_PROD_AGG_BD_FLAGS_EOP_PAD UINT32_C(0x40)
/*
- * The opaque data field is pass through to the completion and
- * can be used for any data that the driver wants to associate
- * with this receive buffer set.
+ * This is the length in Bytes of the host physical buffer where
+ * data for the packet may be placed in host memory.
*/
- uint64_t addr;
+ uint16_t len;
/*
- * This is the host physical address where data for the packet
- * may by placed in host memory.
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with this
+ * receive assembly buffer.
*/
+ uint32_t opaque;
/*
- * While this is a Byte resolution value, it is often
- * advantageous to ensure that the buffers provide start on a
- * host cache line.
+ * This is the host physical address where data for the packet may
+ * by placed in host memory.
*/
+ uint64_t address;
} __attribute__((packed));
-/* Completion Ring Structures */
-/* Note: This structure is used by the HWRM to communicate HWRM Error. */
-/* Base Completion Record (16 bytes) */
+/* cmpl_base (size:128b/16B) */
struct cmpl_base {
- uint16_t type;
- /* unused is 10 b */
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f)
+ #define CMPL_BASE_TYPE_SFT 0
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * TX L2 completion:
+ * Completion of TX packet. Length = 16B
*/
- #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f)
- #define CMPL_BASE_TYPE_SFT 0
- /* TX L2 completion: Completion of TX packet. Length = 16B */
- #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0)
+ #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0)
/*
- * RX L2 completion: Completion of and L2 RX
- * packet. Length = 32B
+ * RX L2 completion:
+ * Completion of and L2 RX packet. Length = 32B
*/
- #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11)
+ #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11)
/*
- * RX Aggregation Buffer completion : Completion
- * of an L2 aggregation buffer in support of
- * TPA, HDS, or Jumbo packet completion. Length
- * = 16B
+ * RX Aggregation Buffer completion :
+ * Completion of an L2 aggregation buffer in support of
+ * TPA, HDS, or Jumbo packet completion. Length = 16B
*/
- #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12)
+ #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12)
/*
- * RX L2 TPA Start Completion: Completion at the
- * beginning of a TPA operation. Length = 32B
+ * RX L2 TPA Start Completion:
+ * Completion at the beginning of a TPA operation.
+ * Length = 32B
*/
- #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13)
+ #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13)
/*
- * RX L2 TPA End Completion: Completion at the
- * end of a TPA operation. Length = 32B
+ * RX L2 TPA End Completion:
+ * Completion at the end of a TPA operation.
+ * Length = 32B
*/
- #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15)
+ #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15)
/*
- * Statistics Ejection Completion: Completion of
- * statistics data ejection buffer. Length = 16B
+ * Statistics Ejection Completion:
+ * Completion of statistics data ejection buffer.
+ * Length = 16B
*/
- #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a)
- /* HWRM Command Completion: Completion of an HWRM command. */
- #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20)
+ #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a)
+ /*
+ * HWRM Command Completion:
+ * Completion of an HWRM command.
+ */
+ #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20)
/* Forwarded HWRM Request */
- #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22)
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22)
/* Forwarded HWRM Response */
- #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24)
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24)
/* HWRM Asynchronous Event Information */
- #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
/* CQ Notification */
- #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30)
+ #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30)
/* SRQ Threshold Event */
- #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32)
+ #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32)
/* DBQ Threshold Event */
- #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34)
+ #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34)
/* QP Async Notification */
- #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38)
+ #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38)
/* Function Async Notification */
- #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a)
- /* unused is 10 b */
- uint16_t info1;
+ #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a)
+ #define CMPL_BASE_TYPE_LAST CMPL_BASE_TYPE_FUNC_EVENT
/* info1 is 16 b */
- uint32_t info2;
+ uint16_t info1;
/* info2 is 32 b */
- uint32_t info3_v;
- /* info3 is 31 b */
- /*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
- */
- #define CMPL_BASE_V UINT32_C(0x1)
- /* info3 is 31 b */
- #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe)
- #define CMPL_BASE_INFO3_SFT 1
- uint32_t info4;
+ uint32_t info2;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ uint32_t info3_v;
+ #define CMPL_BASE_V UINT32_C(0x1)
+ #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe)
+ #define CMPL_BASE_INFO3_SFT 1
/* info4 is 32 b */
+ uint32_t info4;
} __attribute__((packed));
-/* TX Completion Record (16 bytes) */
+/* tx_cmpl (size:128b/16B) */
struct tx_cmpl {
- uint16_t flags_type;
+ uint16_t flags_type;
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define TX_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define TX_CMPL_TYPE_SFT 0
- /* TX L2 completion: Completion of TX packet. Length = 16B */
- #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0)
+ #define TX_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define TX_CMPL_TYPE_SFT 0
/*
- * When this bit is '1', it indicates a packet that has an error
- * of some type. Type of error is indicated in error_flags.
+ * TX L2 completion:
+ * Completion of TX packet. Length = 16B
*/
- #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0)
+ #define TX_CMPL_TYPE_LAST TX_CMPL_TYPE_TX_L2
+ #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_CMPL_FLAGS_SFT 6
+ /*
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * error_flags.
+ */
+ #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40)
/*
* When this bit is '1', it indicates that the packet completed
- * was transmitted using the push acceleration data provided by
- * the driver. When this bit is '0', it indicates that the
+ * was transmitted using the push acceleration data provided
+ * by the driver. When this bit is '0', it indicates that the
* packet had not push acceleration data written or was executed
* as a normal packet even though push data was provided.
*/
- #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80)
- #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define TX_CMPL_FLAGS_SFT 6
- uint16_t unused_0;
+ #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80)
/* unused1 is 16 b */
- uint32_t opaque;
+ uint16_t unused_0;
/*
- * This is a copy of the opaque field from the first TX BD of
- * this transmitted packet.
+ * This is a copy of the opaque field from the first TX BD of this
+ * transmitted packet.
*/
- uint16_t errors_v;
+ uint32_t opaque;
+ uint16_t errors_v;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define TX_CMPL_V UINT32_C(0x1)
+ #define TX_CMPL_V UINT32_C(0x1)
+ #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define TX_CMPL_ERRORS_SFT 1
/*
- * This error indicates that there was some sort of problem with
- * the BDs for the packet.
+ * This error indicates that there was some sort of problem
+ * with the BDs for the packet.
*/
- #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
- #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1
/* No error */
- #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
- /* Bad Format: BDs were not formatted correctly. */
- #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
+ /*
+ * Bad Format:
+ * BDs were not formatted correctly.
+ */
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
#define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT
/*
- * When this bit is '1', it indicates that the length of the
- * packet was zero. No packet was transmitted.
+ * When this bit is '1', it indicates that the length of
+ * the packet was zero. No packet was transmitted.
*/
- #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10)
+ #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10)
/*
- * When this bit is '1', it indicates that the packet was longer
- * than the programmed limit in TDI. No packet was transmitted.
+ * When this bit is '1', it indicates that the packet
+ * was longer than the programmed limit in TDI. No
+ * packet was transmitted.
*/
- #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20)
+ #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20)
/*
* When this bit is '1', it indicates that one or more of the
- * BDs associated with this packet generated a PCI error. This
- * probably means the address was not valid.
+ * BDs associated with this packet generated a PCI error.
+ * This probably means the address was not valid.
*/
- #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40)
+ #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40)
/*
* When this bit is '1', it indicates that the packet was longer
- * than indicated by the hint. No packet was transmitted.
+ * than indicated by the hint. No packet was transmitted.
*/
- #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80)
+ #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80)
/*
* When this bit is '1', it indicates that the packet was
- * dropped due to Poison TLP error on one or more of the TLPs in
- * the PXP completion.
+ * dropped due to Poison TLP error on one or more of the
+ * TLPs in the PXP completion.
*/
- #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100)
- #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe)
- #define TX_CMPL_ERRORS_SFT 1
- uint16_t unused_1;
+ #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100)
/* unused2 is 16 b */
- uint32_t unused_2;
+ uint16_t unused_1;
/* unused3 is 32 b */
+ uint32_t unused_2;
} __attribute__((packed));
-/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */
+/* rx_pkt_cmpl (size:128b/16B) */
struct rx_pkt_cmpl {
- uint16_t flags_type;
+ uint16_t flags_type;
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define RX_PKT_CMPL_TYPE_SFT 0
+ #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PKT_CMPL_TYPE_SFT 0
/*
- * RX L2 completion: Completion of and L2 RX
- * packet. Length = 32B
+ * RX L2 completion:
+ * Completion of and L2 RX packet. Length = 32B
*/
- #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_LAST RX_PKT_CMPL_TYPE_RX_L2
+ #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PKT_CMPL_FLAGS_SFT 6
/*
- * When this bit is '1', it indicates a packet that has an error
- * of some type. Type of error is indicated in error_flags.
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * error_flags.
*/
- #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40)
/* This field indicates how the packet was placed in the buffer. */
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7
- /* Normal: Packet was placed using normal algorithm. */
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7)
- /* Jumbo: Packet was placed using jumbo algorithm. */
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
- /*
- * Header/Data Separation: Packet was placed
- * using Header/Data separation algorithm. The
- * separation location is indicated by the itype
- * field.
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Normal:
+ * Packet was placed using normal algorithm.
+ */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7)
+ /*
+ * Jumbo:
+ * Packet was placed using jumbo algorithm.
+ */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation:
+ * Packet was placed using Header/Data separation algorithm.
+ * The separation location is indicated by the itype field.
*/
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
#define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \
RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
/* This bit is '1' if the RSS field in this completion is valid. */
- #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
/* unused is 1 b */
- #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800)
+ #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800)
/*
* This value indicates what the inner packet determined for the
* packet was.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
- #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12
- /* Not Known: Indicates that the packet type was not known. */
- #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (UINT32_C(0x0) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12
/*
- * IP Packet: Indicates that the packet was an
- * IP packet, but further classification was not
- * possible.
+ * Not Known:
+ * Indicates that the packet type was not known.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_IP (UINT32_C(0x1) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN \
+ (UINT32_C(0x0) << 12)
/*
- * TCP Packet: Indicates that the packet was IP
- * and TCP. This indicates that the
- * payload_offset field is valid.
+ * IP Packet:
+ * Indicates that the packet was an IP packet, but further
+ * classification was not possible.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_IP \
+ (UINT32_C(0x1) << 12)
/*
- * UDP Packet: Indicates that the packet was IP
- * and UDP. This indicates that the
- * payload_offset field is valid.
+ * TCP Packet:
+ * Indicates that the packet was IP and TCP.
+ * This indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_TCP \
+ (UINT32_C(0x2) << 12)
/*
- * FCoE Packet: Indicates that the packet was
- * recognized as a FCoE. This also indicates
- * that the payload_offset field is valid.
+ * UDP Packet:
+ * Indicates that the packet was IP and UDP.
+ * This indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_UDP \
+ (UINT32_C(0x3) << 12)
/*
- * RoCE Packet: Indicates that the packet was
- * recognized as a RoCE. This also indicates
- * that the payload_offset field is valid.
+ * FCoE Packet:
+ * Indicates that the packet was recognized as a FCoE.
+ * This also indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE \
+ (UINT32_C(0x4) << 12)
/*
- * ICMP Packet: Indicates that the packet was
- * recognized as ICMP. This indicates that the
- * payload_offset field is valid.
+ * RoCE Packet:
+ * Indicates that the packet was recognized as a RoCE.
+ * This also indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE \
+ (UINT32_C(0x5) << 12)
/*
- * PtP packet wo/timestamp: Indicates that the
- * packet was recognized as a PtP packet.
+ * ICMP Packet:
+ * Indicates that the packet was recognized as ICMP.
+ * This indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP (UINT32_C(0x8) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP \
+ (UINT32_C(0x7) << 12)
/*
- * PtP packet w/timestamp: Indicates that the
- * packet was recognized as a PtP packet and
- * that a timestamp was taken for the packet.
+ * PtP packet wo/timestamp:
+ * Indicates that the packet was recognized as a PtP
+ * packet.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP \
+ (UINT32_C(0x8) << 12)
+ /*
+ * PtP packet w/timestamp:
+ * Indicates that the packet was recognized as a PtP
+ * packet and that a timestamp was taken for the packet.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP \
+ (UINT32_C(0x9) << 12)
#define RX_PKT_CMPL_FLAGS_ITYPE_LAST \
RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
- #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_PKT_CMPL_FLAGS_SFT 6
- uint16_t len;
/*
* This is the length of the data for the packet stored in the
- * buffer(s) identified by the opaque value. This includes the
- * packet BD and any associated buffer BDs. This does not
- * include the length of any data places in aggregation BDs.
+ * buffer(s) identified by the opaque value. This includes
+ * the packet BD and any associated buffer BDs. This does not include
+ * the the length of any data places in aggregation BDs.
*/
- uint32_t opaque;
+ uint16_t len;
/*
- * This is a copy of the opaque field from the RX BD this
- * completion corresponds to.
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
*/
- uint8_t agg_bufs_v1;
- /* unused1 is 2 b */
+ uint32_t opaque;
+ uint8_t agg_bufs_v1;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define RX_PKT_CMPL_V1 UINT32_C(0x1)
+ #define RX_PKT_CMPL_V1 UINT32_C(0x1)
/*
- * This value is the number of aggregation buffers that follow
- * this entry in the completion ring that are a part of this
- * packet. If the value is zero, then the packet is completely
- * contained in the buffer space provided for the packet in the
- * RX ring.
+ * This value is the number of aggregation buffers that follow this
+ * entry in the completion ring that are a part of this packet.
+ * If the value is zero, then the packet is completely contained
+ * in the buffer space provided for the packet in the RX ring.
*/
- #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e)
- #define RX_PKT_CMPL_AGG_BUFS_SFT 1
+ #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e)
+ #define RX_PKT_CMPL_AGG_BUFS_SFT 1
/* unused1 is 2 b */
- uint8_t rss_hash_type;
- /*
- * This is the RSS hash type for the packet. The value is packed
- * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}
- * . The value of tuple_extrac_op provides the information about
- * what fields the hash was computed on. * 0: The RSS hash was
- * computed over source IP address, destination IP address,
- * source port, and destination port of inner IP and TCP or UDP
- * headers. Note: For non-tunneled packets, the packet headers
- * are considered inner packet headers for the RSS hash
- * computation purpose. * 1: The RSS hash was computed over
- * source IP address and destination IP address of inner IP
- * header. Note: For non-tunneled packets, the packet headers
- * are considered inner packet headers for the RSS hash
- * computation purpose. * 2: The RSS hash was computed over
- * source IP address, destination IP address, source port, and
- * destination port of IP and TCP or UDP headers of outer tunnel
- * headers. Note: For non-tunneled packets, this value is not
- * applicable. * 3: The RSS hash was computed over source IP
- * address and destination IP address of IP header of outer
- * tunnel headers. Note: For non-tunneled packets, this value is
- * not applicable. Note that 4-tuples values listed above are
- * applicable for layer 4 protocols supported and enabled for
- * RSS in the hardware, HWRM firmware, and drivers. For example,
- * if RSS hash is supported and enabled for TCP traffic only,
- * then the values of tuple_extract_op corresponding to 4-tuples
- * are only valid for TCP traffic.
- */
- uint8_t payload_offset;
- /*
- * This value indicates the offset in bytes from the beginning
- * of the packet where the inner payload starts. This value is
- * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero
- * indicates that header is 256B into the packet.
- */
- uint8_t unused_1;
+ #define RX_PKT_CMPL_UNUSED1_MASK UINT32_C(0xc0)
+ #define RX_PKT_CMPL_UNUSED1_SFT 6
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}.
+ *
+ * The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on.
+ * * 0: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of inner
+ * IP and TCP or UDP headers. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 1: The RSS hash was computed over source IP address and destination
+ * IP address of inner IP header. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 2: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of
+ * IP and TCP or UDP headers of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ * * 3: The RSS hash was computed over source IP address and
+ * destination IP address of IP header of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ *
+ * Note that 4-tuples values listed above are applicable
+ * for layer 4 protocols supported and enabled for RSS in the hardware,
+ * HWRM firmware, and drivers. For example, if RSS hash is supported and
+ * enabled for TCP traffic only, then the values of tuple_extract_op
+ * corresponding to 4-tuples are only valid for TCP traffic.
+ */
+ uint8_t rss_hash_type;
+ /*
+ * This value indicates the offset in bytes from the beginning of the packet
+ * where the inner payload starts. This value is valid for TCP, UDP,
+ * FCoE, and RoCE packets.
+ *
+ * A value of zero indicates that header is 256B into the packet.
+ */
+ uint8_t payload_offset;
/* unused2 is 8 b */
- uint32_t rss_hash;
+ uint8_t unused1;
/*
* This value is the RSS hash value calculated for the packet
* based on the mode bits and key value in the VNIC.
*/
+ uint32_t rss_hash;
} __attribute__((packed));
-/* last 16 bytes of RX Packet Completion Record */
+/* rx_pkt_cmpl_hi (size:128b/16B) */
struct rx_pkt_cmpl_hi {
- uint32_t flags2;
+ uint32_t flags2;
/*
* This indicates that the ip checksum was calculated for the
- * inner packet and that the ip_cs_error field indicates if
- * there was an error.
+ * inner packet and that the ip_cs_error field indicates if there
+ * was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
/*
* This indicates that the TCP, UDP or ICMP checksum was
- * calculated for the inner packet and that the l4_cs_error
- * field indicates if there was an error.
+ * calculated for the inner packet and that the l4_cs_error field
+ * indicates if there was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
/*
* This indicates that the ip checksum was calculated for the
- * tunnel header and that the t_ip_cs_error field indicates if
- * there was an error.
+ * tunnel header and that the t_ip_cs_error field indicates if there
+ * was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
/*
- * This indicates that the UDP checksum was calculated for the
- * tunnel packet and that the t_l4_cs_error field indicates if
- * there was an error.
+ * This indicates that the UDP checksum was
+ * calculated for the tunnel packet and that the t_l4_cs_error field
+ * indicates if there was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
/* This value indicates what format the metadata field is. */
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4
- /* No metadata informtaion. Value is zero. */
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
- /*
- * The metadata field contains the VLAN tag and
- * TPID value. - metadata[11:0] contains the
- * vlan VID value. - metadata[12] contains the
- * vlan DE value. - metadata[15:13] contains the
- * vlan PRI value. - metadata[31:16] contains
- * the vlan TPID value.
- */
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and TPID value.
+ * - metadata[11:0] contains the vlan VID value.
+ * - metadata[12] contains the vlan DE value.
+ * - metadata[15:13] contains the vlan PRI value.
+ * - metadata[31:16] contains the vlan TPID value.
+ */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
/*
- * This field indicates the IP type for the inner-most IP
- * header. A value of '0' indicates IPv4. A value of '1'
- * indicates IPv6. This value is only valid if itype indicates a
- * packet with an IP header.
+ * This field indicates the IP type for the inner-most IP header.
+ * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
+ * This value is only valid if itype indicates a packet
+ * with an IP header.
*/
- #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
- uint32_t metadata;
+ #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
/*
- * This is data from the CFA block as indicated by the
- * meta_format field.
+ * This is data from the CFA block as indicated by the meta_format
+ * field.
*/
+ uint32_t metadata;
/* When meta_format=1, this value is the VLAN VID. */
- #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
- #define RX_PKT_CMPL_METADATA_VID_SFT 0
+ #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_PKT_CMPL_METADATA_VID_SFT 0
/* When meta_format=1, this value is the VLAN DE. */
- #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000)
+ #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000)
/* When meta_format=1, this value is the VLAN PRI. */
- #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
- #define RX_PKT_CMPL_METADATA_PRI_SFT 13
+ #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+ #define RX_PKT_CMPL_METADATA_PRI_SFT 13
/* When meta_format=1, this value is the VLAN TPID. */
- #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
- #define RX_PKT_CMPL_METADATA_TPID_SFT 16
- uint16_t errors_v2;
+ #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_PKT_CMPL_METADATA_TPID_SFT 16
+ uint16_t errors_v2;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define RX_PKT_CMPL_V2 UINT32_C(0x1)
+ #define RX_PKT_CMPL_V2 \
+ UINT32_C(0x1)
+ #define RX_PKT_CMPL_ERRORS_MASK \
+ UINT32_C(0xfffe)
+ #define RX_PKT_CMPL_ERRORS_SFT 1
/*
* This error indicates that there was some sort of problem with
* the BDs for the packet that was found after part of the
- * packet was already placed. The packet should be treated as
+ * packet was already placed. The packet should be treated as
* invalid.
*/
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK \
+ UINT32_C(0xe)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
/* No buffer error */
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (UINT32_C(0x0) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \
+ (UINT32_C(0x0) << 1)
/*
- * Did Not Fit: Packet did not fit into packet
- * buffer provided. For regular placement, this
- * means the packet did not fit in the buffer
- * provided. For HDS and jumbo placement, this
- * means that the packet could not be placed
- * into 7 physical buffers or less.
+ * Did Not Fit:
+ * Packet did not fit into packet buffer provided.
+ * For regular placement, this means the packet did not fit
+ * in the buffer provided. For HDS and jumbo placement, this
+ * means that the packet could not be placed into 7 physical
+ * buffers or less.
*/
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \
(UINT32_C(0x1) << 1)
/*
- * Not On Chip: All BDs needed for the packet
- * were not on-chip when the packet arrived.
+ * Not On Chip:
+ * All BDs needed for the packet were not on-chip when
+ * the packet arrived.
*/
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
(UINT32_C(0x2) << 1)
- /* Bad Format: BDs were not formatted correctly. */
+ /*
+ * Bad Format:
+ * BDs were not formatted correctly.
+ */
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \
(UINT32_C(0x3) << 1)
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT
- /* This indicates that there was an error in the IP header checksum. */
- #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10)
/*
- * This indicates that there was an error in the TCP, UDP or
- * ICMP checksum.
+ * This indicates that there was an error in the IP header
+ * checksum.
*/
- #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR UINT32_C(0x20)
+ #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR \
+ UINT32_C(0x10)
/*
- * This indicates that there was an error in the tunnel IP
- * header checksum.
+ * This indicates that there was an error in the TCP, UDP
+ * or ICMP checksum.
*/
- #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR UINT32_C(0x40)
+ #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR \
+ UINT32_C(0x20)
/*
- * This indicates that there was an error in the tunnel UDP
- * checksum.
+ * This indicates that there was an error in the tunnel
+ * IP header checksum.
*/
- #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR UINT32_C(0x80)
+ #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR \
+ UINT32_C(0x40)
+ /*
+ * This indicates that there was an error in the tunnel
+ * UDP checksum.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR \
+ UINT32_C(0x80)
/*
* This indicates that there was a CRC error on either an FCoE
- * or RoCE packet. The itype indicates the packet type.
+ * or RoCE packet. The itype indicates the packet type.
*/
- #define RX_PKT_CMPL_ERRORS_CRC_ERROR UINT32_C(0x100)
+ #define RX_PKT_CMPL_ERRORS_CRC_ERROR \
+ UINT32_C(0x100)
/*
- * This indicates that there was an error in the tunnel portion
- * of the packet when this field is non-zero.
+ * This indicates that there was an error in the tunnel
+ * portion of the packet when this
+ * field is non-zero.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK UINT32_C(0xe00)
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK \
+ UINT32_C(0xe00)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9
/*
- * No additional error occurred on the tunnel
- * portion of the packet of the packet does not
- * have a tunnel.
+ * No additional error occurred on the tunnel portion
+ * of the packet of the packet does not have a tunnel.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR \
+ (UINT32_C(0x0) << 9)
/*
- * Indicates that IP header version does not
- * match expectation from L2 Ethertype for IPv4
- * and IPv6 in the tunnel header.
+ * Indicates that IP header version does not match
+ * expectation from L2 Ethertype for IPv4 and IPv6
+ * in the tunnel header.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \
(UINT32_C(0x1) << 9)
/*
- * Indicates that header length is out of range
- * in the tunnel header. Valid for IPv4.
+ * Indicates that header length is out of range in the
+ * tunnel header. Valid for
+ * IPv4.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \
(UINT32_C(0x2) << 9)
/*
- * Indicates that the physical packet is shorter
- * than that claimed by the PPPoE header length
- * for a tunnel PPPoE packet.
+ * Indicates that the physical packet is shorter than that
+ * claimed by the PPPoE header length for a tunnel PPPoE
+ * packet.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \
(UINT32_C(0x3) << 9)
/*
- * Indicates that physical packet is shorter
- * than that claimed by the tunnel l3 header
- * length. Valid for IPv4, or IPv6 tunnel packet
- * packets.
+ * Indicates that physical packet is shorter than that claimed
+ * by the tunnel l3 header length. Valid for IPv4, or IPv6
+ * tunnel packet packets.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \
(UINT32_C(0x4) << 9)
/*
- * Indicates that the physical packet is shorter
- * than that claimed by the tunnel UDP header
- * length for a tunnel UDP packet that is not
- * fragmented.
+ * Indicates that the physical packet is shorter than that
+ * claimed by the tunnel UDP header length for a tunnel
+ * UDP packet that is not fragmented.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \
(UINT32_C(0x5) << 9)
/*
- * indicates that the IPv4 TTL or IPv6 hop limit
- * check have failed (e.g. TTL = 0) in the
- * tunnel header. Valid for IPv4, and IPv6.
+ * indicates that the IPv4 TTL or IPv6 hop limit check
+ * have failed (e.g. TTL = 0) in the tunnel header. Valid
+ * for IPv4, and IPv6.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \
(UINT32_C(0x6) << 9)
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
/*
- * This indicates that there was an error in the inner portion
- * of the packet when this field is non-zero.
+ * This indicates that there was an error in the inner
+ * portion of the packet when this
+ * field is non-zero.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK UINT32_C(0xf000)
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK \
+ UINT32_C(0xf000)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12
/*
- * No additional error occurred on the tunnel
- * portion of the packet of the packet does not
- * have a tunnel.
+ * No additional error occurred on the tunnel portion
+ * of the packet of the packet does not have a tunnel.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR \
+ (UINT32_C(0x0) << 12)
/*
- * Indicates that IP header version does not
- * match expectation from L2 Ethertype for IPv4
- * and IPv6 or that option other than VFT was
- * parsed on FCoE packet.
+ * Indicates that IP header version does not match
+ * expectation from L2 Ethertype for IPv4 and IPv6 or that
+ * option other than VFT was parsed on
+ * FCoE packet.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \
(UINT32_C(0x1) << 12)
/*
- * indicates that header length is out of range.
- * Valid for IPv4 and RoCE
+ * indicates that header length is out of range. Valid for
+ * IPv4 and RoCE
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \
(UINT32_C(0x2) << 12)
/*
- * indicates that the IPv4 TTL or IPv6 hop limit
- * check have failed (e.g. TTL = 0). Valid for
- * IPv4, and IPv6
+ * indicates that the IPv4 TTL or IPv6 hop limit check
+ * have failed (e.g. TTL = 0). Valid for IPv4, and IPv6
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL \
+ (UINT32_C(0x3) << 12)
/*
- * Indicates that physical packet is shorter
- * than that claimed by the l3 header length.
- * Valid for IPv4, IPv6 packet or RoCE packets.
+ * Indicates that physical packet is shorter than that
+ * claimed by the l3 header length. Valid for IPv4,
+ * IPv6 packet or RoCE packets.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \
(UINT32_C(0x4) << 12)
/*
- * Indicates that the physical packet is shorter
- * than that claimed by the UDP header length
- * for a UDP packet that is not fragmented.
+ * Indicates that the physical packet is shorter than that
+ * claimed by the UDP header length for a UDP packet that is
+ * not fragmented.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \
(UINT32_C(0x5) << 12)
/*
- * Indicates that TCP header length > IP
- * payload. Valid for TCP packets only.
+ * Indicates that TCP header length > IP payload. Valid for
+ * TCP packets only.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \
(UINT32_C(0x6) << 12)
@@ -1218,1626 +2380,3129 @@ struct rx_pkt_cmpl_hi {
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \
(UINT32_C(0x7) << 12)
/*
- * Indicates that TCP option headers result in a
- * TCP header size that does not match data
- * offset in TCP header. Valid for TCP.
+ * Indicates that TCP option headers result in a TCP header
+ * size that does not match data offset in TCP header. Valid
+ * for TCP.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
(UINT32_C(0x8) << 12)
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
- #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe)
- #define RX_PKT_CMPL_ERRORS_SFT 1
- uint16_t cfa_code;
/*
- * This field identifies the CFA action rule that was used for
- * this packet.
+ * This field identifies the CFA action rule that was used for this
+ * packet.
*/
- uint32_t reorder;
+ uint16_t cfa_code;
+ uint32_t reorder;
/*
- * This value holds the reordering sequence number for the
- * packet. If the reordering sequence is not valid, then this
- * value is zero. The reordering domain for the packet is in the
- * bottom 8 to 10b of the rss_hash value. The bottom 20b of this
- * value contain the ordering domain value for the packet.
+ * This value holds the reordering sequence number for the packet.
+ * If the reordering sequence is not valid, then this value is zero.
+ * The reordering domain for the packet is in the bottom 8 to 10b of
+ * the rss_hash value. The bottom 20b of this value contain the
+ * ordering domain value for the packet.
*/
- #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
- #define RX_PKT_CMPL_REORDER_SFT 0
+ #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
+ #define RX_PKT_CMPL_REORDER_SFT 0
} __attribute__((packed));
-/* RX L2 TPA Start Completion Record (32 bytes split to 2 16-byte struct) */
+/* rx_tpa_start_cmpl (size:128b/16B) */
struct rx_tpa_start_cmpl {
- uint16_t flags_type;
- /*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
- */
- #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define RX_TPA_START_CMPL_TYPE_SFT 0
- /*
- * RX L2 TPA Start Completion: Completion at the
- * beginning of a TPA operation. Length = 32B
- */
- #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13)
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_START_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA Start Completion:
+ * Completion at the beginning of a TPA operation.
+ * Length = 32B
+ */
+ #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13)
+ #define RX_TPA_START_CMPL_TYPE_LAST \
+ RX_TPA_START_CMPL_TYPE_RX_TPA_START
+ #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_START_CMPL_FLAGS_SFT 6
/* This bit will always be '0' for TPA start completions. */
- #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40)
/* This field indicates how the packet was placed in the buffer. */
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7
/*
- * Jumbo: TPA Packet was placed using jumbo
- * algorithm. This means that the first buffer
- * will be filled with data before moving to
- * aggregation buffers. Each aggregation buffer
- * will be filled before moving to the next
- * aggregation buffer.
+ * Jumbo:
+ * TPA Packet was placed using jumbo algorithm. This means
+ * that the first buffer will be filled with data before
+ * moving to aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next aggregation
+ * buffer.
*/
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO \
+ (UINT32_C(0x1) << 7)
/*
- * Header/Data Separation: Packet was placed
- * using Header/Data separation algorithm. The
- * separation location is indicated by the itype
- * field.
+ * Header/Data Separation:
+ * Packet was placed using Header/Data separation algorithm.
+ * The separation location is indicated by the itype field.
*/
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS \
+ (UINT32_C(0x2) << 7)
/*
- * GRO/Jumbo: Packet will be placed using
- * GRO/Jumbo where the first packet is filled
- * with data. Subsequent packets will be placed
- * such that any one packet does not span two
- * aggregation buffers unless it starts at the
- * beginning of an aggregation buffer.
+ * GRO/Jumbo:
+ * Packet will be placed using GRO/Jumbo where the first
+ * packet is filled with data. Subsequent packets will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
*/
#define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \
(UINT32_C(0x5) << 7)
/*
- * GRO/Header-Data Separation: Packet will be
- * placed using GRO/HDS where the header is in
- * the first packet. Payload of each packet will
- * be placed such that any one packet does not
- * span two aggregation buffers unless it starts
- * at the beginning of an aggregation buffer.
+ * GRO/Header-Data Separation:
+ * Packet will be placed using GRO/HDS where the header
+ * is in the first packet.
+ * Payload of each packet will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
*/
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS \
+ (UINT32_C(0x6) << 7)
#define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \
RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS
/* This bit is '1' if the RSS field in this completion is valid. */
- #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
/* unused is 1 b */
- #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800)
+ #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800)
/*
* This value indicates what the inner packet determined for the
* packet was.
*/
- #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
- #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12
- /* TCP Packet: Indicates that the packet was IP and TCP. */
- #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12
+ /*
+ * TCP Packet:
+ * Indicates that the packet was IP and TCP.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP \
+ (UINT32_C(0x2) << 12)
#define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \
RX_TPA_START_CMPL_FLAGS_ITYPE_TCP
- #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_TPA_START_CMPL_FLAGS_SFT 6
- uint16_t len;
/*
* This value indicates the amount of packet data written to the
* buffer the opaque field in this completion corresponds to.
*/
- uint32_t opaque;
- /*
- * This is a copy of the opaque field from the RX BD this
- * completion corresponds to.
- */
- uint8_t v1;
- /* unused1 is 7 b */
- /*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
- */
- #define RX_TPA_START_CMPL_V1 UINT32_C(0x1)
- /* unused1 is 7 b */
- uint8_t rss_hash_type;
- /*
- * This is the RSS hash type for the packet. The value is packed
- * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}
- * . The value of tuple_extrac_op provides the information about
- * what fields the hash was computed on. * 0: The RSS hash was
- * computed over source IP address, destination IP address,
- * source port, and destination port of inner IP and TCP or UDP
- * headers. Note: For non-tunneled packets, the packet headers
- * are considered inner packet headers for the RSS hash
- * computation purpose. * 1: The RSS hash was computed over
- * source IP address and destination IP address of inner IP
- * header. Note: For non-tunneled packets, the packet headers
- * are considered inner packet headers for the RSS hash
- * computation purpose. * 2: The RSS hash was computed over
- * source IP address, destination IP address, source port, and
- * destination port of IP and TCP or UDP headers of outer tunnel
- * headers. Note: For non-tunneled packets, this value is not
- * applicable. * 3: The RSS hash was computed over source IP
- * address and destination IP address of IP header of outer
- * tunnel headers. Note: For non-tunneled packets, this value is
- * not applicable. Note that 4-tuples values listed above are
- * applicable for layer 4 protocols supported and enabled for
- * RSS in the hardware, HWRM firmware, and drivers. For example,
- * if RSS hash is supported and enabled for TCP traffic only,
- * then the values of tuple_extract_op corresponding to 4-tuples
- * are only valid for TCP traffic.
- */
- uint16_t agg_id;
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
+ */
+ uint32_t opaque;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ uint8_t v1;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V1 UINT32_C(0x1)
+ #define RX_TPA_START_CMPL_LAST RX_TPA_START_CMPL_V1
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}.
+ *
+ * The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on.
+ * * 0: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of inner
+ * IP and TCP or UDP headers. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 1: The RSS hash was computed over source IP address and destination
+ * IP address of inner IP header. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 2: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of
+ * IP and TCP or UDP headers of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ * * 3: The RSS hash was computed over source IP address and
+ * destination IP address of IP header of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ *
+ * Note that 4-tuples values listed above are applicable
+ * for layer 4 protocols supported and enabled for RSS in the hardware,
+ * HWRM firmware, and drivers. For example, if RSS hash is supported and
+ * enabled for TCP traffic only, then the values of tuple_extract_op
+ * corresponding to 4-tuples are only valid for TCP traffic.
+ */
+ uint8_t rss_hash_type;
/*
* This is the aggregation ID that the completion is associated
- * with. Use this number to correlate the TPA start completion
+ * with. Use this number to correlate the TPA start completion
* with the TPA end completion.
*/
+ uint16_t agg_id;
/* unused2 is 9 b */
+ #define RX_TPA_START_CMPL_UNUSED2_MASK UINT32_C(0x1ff)
+ #define RX_TPA_START_CMPL_UNUSED2_SFT 0
/*
* This is the aggregation ID that the completion is associated
- * with. Use this number to correlate the TPA start completion
+ * with. Use this number to correlate the TPA start completion
* with the TPA end completion.
*/
- #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00)
- #define RX_TPA_START_CMPL_AGG_ID_SFT 9
- uint32_t rss_hash;
+ #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00)
+ #define RX_TPA_START_CMPL_AGG_ID_SFT 9
/*
* This value is the RSS hash value calculated for the packet
* based on the mode bits and key value in the VNIC.
*/
+ uint32_t rss_hash;
} __attribute__((packed));
-/* last 16 bytes of RX L2 TPA Start Completion Record */
+/* rx_tpa_start_cmpl_hi (size:128b/16B) */
struct rx_tpa_start_cmpl_hi {
- uint32_t flags2;
+ uint32_t flags2;
/*
* This indicates that the ip checksum was calculated for the
* inner packet and that the sum passed for all segments
* included in the aggregation.
*/
- #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
/*
* This indicates that the TCP, UDP or ICMP checksum was
- * calculated for the inner packet and that the sum passed for
- * all segments included in the aggregation.
+ * calculated for the inner packet and that the sum passed
+ * for all segments included in the aggregation.
*/
- #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
/*
* This indicates that the ip checksum was calculated for the
* tunnel header and that the sum passed for all segments
* included in the aggregation.
*/
- #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
/*
- * This indicates that the UDP checksum was calculated for the
- * tunnel packet and that the sum passed for all segments
- * included in the aggregation.
+ * This indicates that the UDP checksum was
+ * calculated for the tunnel packet and that the sum passed for
+ * all segments included in the aggregation.
*/
- #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
/* This value indicates what format the metadata field is. */
#define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
- #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4
- /* No metadata informtaion. Value is zero. */
- #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
- /*
- * The metadata field contains the VLAN tag and
- * TPID value. - metadata[11:0] contains the
- * vlan VID value. - metadata[12] contains the
- * vlan DE value. - metadata[15:13] contains the
- * vlan PRI value. - metadata[31:16] contains
- * the vlan TPID value.
- */
- #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE \
+ (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and TPID value.
+ * - metadata[11:0] contains the vlan VID value.
+ * - metadata[12] contains the vlan DE value.
+ * - metadata[15:13] contains the vlan PRI value.
+ * - metadata[31:16] contains the vlan TPID value.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN \
+ (UINT32_C(0x1) << 4)
#define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \
RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN
/*
- * This field indicates the IP type for the inner-most IP
- * header. A value of '0' indicates IPv4. A value of '1'
- * indicates IPv6.
+ * This field indicates the IP type for the inner-most IP header.
+ * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
*/
- #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
- uint32_t metadata;
+ #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
/*
- * This is data from the CFA block as indicated by the
- * meta_format field.
+ * This is data from the CFA block as indicated by the meta_format
+ * field.
*/
+ uint32_t metadata;
/* When meta_format=1, this value is the VLAN VID. */
- #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
- #define RX_TPA_START_CMPL_METADATA_VID_SFT 0
+ #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_TPA_START_CMPL_METADATA_VID_SFT 0
/* When meta_format=1, this value is the VLAN DE. */
- #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000)
+ #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000)
/* When meta_format=1, this value is the VLAN PRI. */
#define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
- #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13
+ #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13
/* When meta_format=1, this value is the VLAN TPID. */
- #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
- #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16
- uint16_t v2;
- /* unused4 is 15 b */
+ #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16
+ uint16_t v2;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define RX_TPA_START_CMPL_V2 UINT32_C(0x1)
- /* unused4 is 15 b */
- uint16_t cfa_code;
+ #define RX_TPA_START_CMPL_V2 UINT32_C(0x1)
/*
- * This field identifies the CFA action rule that was used for
- * this packet.
+ * This field identifies the CFA action rule that was used for this
+ * packet.
*/
- uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset;
+ uint16_t cfa_code;
/*
- * This is the size in bytes of the inner most L4 header. This
- * can be subtracted from the payload_offset to determine the
- * start of the inner most L4 header.
+ * This is the size in bytes of the inner most L4 header.
+ * This can be subtracted from the payload_offset to determine
+ * the start of the inner most L4 header.
*/
+ uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset;
/*
- * This is the offset from the beginning of the packet in bytes
- * for the outer L3 header. If there is no outer L3 header, then
- * this value is zero.
+ * This is the offset from the beginning of the packet in bytes for
+ * the outer L3 header. If there is no outer L3 header, then this
+ * value is zero.
*/
- #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff)
- #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff)
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0
/*
- * This is the offset from the beginning of the packet in bytes
- * for the inner most L2 header.
+ * This is the offset from the beginning of the packet in bytes for
+ * the inner most L2 header.
*/
- #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00)
- #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00)
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9
/*
- * This is the offset from the beginning of the packet in bytes
- * for the inner most L3 header.
+ * This is the offset from the beginning of the packet in bytes for
+ * the inner most L3 header.
*/
- #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000)
- #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000)
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18
/*
- * This is the size in bytes of the inner most L4 header. This
- * can be subtracted from the payload_offset to determine the
- * start of the inner most L4 header.
+ * This is the size in bytes of the inner most L4 header.
+ * This can be subtracted from the payload_offset to determine
+ * the start of the inner most L4 header.
*/
- #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
- #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27
} __attribute__((packed));
-/* RX TPA End Completion Record (32 bytes split to 2 16-byte struct) */
+/* rx_tpa_end_cmpl (size:128b/16B) */
struct rx_tpa_end_cmpl {
- uint16_t flags_type;
+ uint16_t flags_type;
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define RX_TPA_END_CMPL_TYPE_SFT 0
+ #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_END_CMPL_TYPE_SFT 0
/*
- * RX L2 TPA End Completion: Completion at the
- * end of a TPA operation. Length = 32B
+ * RX L2 TPA End Completion:
+ * Completion at the end of a TPA operation.
+ * Length = 32B
*/
- #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15)
+ #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15)
+ #define RX_TPA_END_CMPL_TYPE_LAST \
+ RX_TPA_END_CMPL_TYPE_RX_TPA_END
+ #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_END_CMPL_FLAGS_SFT 6
/*
- * When this bit is '1', it indicates a packet that has an error
- * of some type. Type of error is indicated in error_flags.
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * error_flags.
*/
- #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40)
/* This field indicates how the packet was placed in the buffer. */
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo:
+ * TPA Packet was placed using jumbo algorithm. This means
+ * that the first buffer will be filled with data before
+ * moving to aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next aggregation
+ * buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO \
+ (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation:
+ * Packet was placed using Header/Data separation algorithm.
+ * The separation location is indicated by the itype field.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS \
+ (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo:
+ * Packet will be placed using GRO/Jumbo where the first
+ * packet is filled with data. Subsequent packets will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \
+ (UINT32_C(0x5) << 7)
/*
- * Jumbo: TPA Packet was placed using jumbo
- * algorithm. This means that the first buffer
- * will be filled with data before moving to
- * aggregation buffers. Each aggregation buffer
- * will be filled before moving to the next
- * aggregation buffer.
+ * GRO/Header-Data Separation:
+ * Packet will be placed using GRO/HDS where the header
+ * is in the first packet.
+ * Payload of each packet will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
*/
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS \
+ (UINT32_C(0x6) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* unused is 2 b */
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00)
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10
/*
- * Header/Data Separation: Packet was placed
- * using Header/Data separation algorithm. The
- * separation location is indicated by the itype
- * field.
+ * This value indicates what the inner packet determined for the
+ * packet was.
+ * - 2 TCP Packet
+ * Indicates that the packet was IP and TCP. This indicates
+ * that the ip_cs field is valid and that the tcp_udp_cs
+ * field is valid and contains the TCP checksum.
+ * This also indicates that the payload_offset field is valid.
*/
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12
/*
- * GRO/Jumbo: Packet will be placed using
- * GRO/Jumbo where the first packet is filled
- * with data. Subsequent packets will be placed
- * such that any one packet does not span two
- * aggregation buffers unless it starts at the
- * beginning of an aggregation buffer.
+ * This value is zero for TPA End completions.
+ * There is no data in the buffer that corresponds to the opaque
+ * value in this completion.
*/
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO (UINT32_C(0x5) << 7)
+ uint16_t len;
/*
- * GRO/Header-Data Separation: Packet will be
- * placed using GRO/HDS where the header is in
- * the first packet. Payload of each packet will
- * be placed such that any one packet does not
- * span two aggregation buffers unless it starts
- * at the beginning of an aggregation buffer.
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
*/
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \
- RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS
- /* unused is 2 b */
- #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00)
- #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10
+ uint32_t opaque;
/*
- * This value indicates what the inner packet determined for the
- * packet was. - 2 TCP Packet Indicates that the packet was IP
- * and TCP. This indicates that the ip_cs field is valid and
- * that the tcp_udp_cs field is valid and contains the TCP
- * checksum. This also indicates that the payload_offset field
- * is valid.
- */
- #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
- #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12
- #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_TPA_END_CMPL_FLAGS_SFT 6
- uint16_t len;
- /*
- * This value is zero for TPA End completions. There is no data
- * in the buffer that corresponds to the opaque value in this
- * completion.
- */
- uint32_t opaque;
- /*
- * This is a copy of the opaque field from the RX BD this
- * completion corresponds to.
- */
- uint8_t agg_bufs_v1;
- /* unused1 is 1 b */
- /*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
- */
- #define RX_TPA_END_CMPL_V1 UINT32_C(0x1)
- /*
- * This value is the number of aggregation buffers that follow
- * this entry in the completion ring that are a part of this
- * aggregation packet. If the value is zero, then the packet is
- * completely contained in the buffer space provided in the
- * aggregation start completion.
- */
- #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e)
- #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1
- /* unused1 is 1 b */
- uint8_t tpa_segs;
- /* This value is the number of segments in the TPA operation. */
- uint8_t payload_offset;
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ uint8_t agg_bufs_v1;
/*
- * This value indicates the offset in bytes from the beginning
- * of the packet where the inner payload starts. This value is
- * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero
- * indicates an offset of 256 bytes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t agg_id;
+ #define RX_TPA_END_CMPL_V1 UINT32_C(0x1)
/*
- * This is the aggregation ID that the completion is associated
- * with. Use this number to correlate the TPA start completion
- * with the TPA end completion.
+ * This value is the number of aggregation buffers that follow this
+ * entry in the completion ring that are a part of this aggregation
+ * packet.
+ * If the value is zero, then the packet is completely contained
+ * in the buffer space provided in the aggregation start completion.
*/
+ #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e)
+ #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1
+ /* This value is the number of segments in the TPA operation. */
+ uint8_t tpa_segs;
+ /*
+ * This value indicates the offset in bytes from the beginning of the packet
+ * where the inner payload starts. This value is valid for TCP, UDP,
+ * FCoE, and RoCE packets.
+ *
+ * A value of zero indicates an offset of 256 bytes.
+ */
+ uint8_t payload_offset;
+ uint8_t agg_id;
/* unused2 is 1 b */
+ #define RX_TPA_END_CMPL_UNUSED2 UINT32_C(0x1)
/*
* This is the aggregation ID that the completion is associated
- * with. Use this number to correlate the TPA start completion
+ * with. Use this number to correlate the TPA start completion
* with the TPA end completion.
*/
- #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe)
- #define RX_TPA_END_CMPL_AGG_ID_SFT 1
- uint32_t tsdelta;
+ #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe)
+ #define RX_TPA_END_CMPL_AGG_ID_SFT 1
/*
- * For non-GRO packets, this value is the timestamp delta
- * between earliest and latest timestamp values for TPA packet.
- * If packets were not time stamped, then delta will be zero.
+ * For non-GRO packets, this value is the
+ * timestamp delta between earliest and latest timestamp values for
+ * TPA packet. If packets were not time stamped, then delta will be
+ * zero.
+ *
* For GRO packets, this field is zero except for the following
- * sub-fields. - tsdelta[31] Timestamp present indication. When
- * '0', no Timestamp option is in the packet. When '1', then a
- * Timestamp option is present in the packet.
+ * sub-fields.
+ * - tsdelta[31]
+ * Timestamp present indication. When '0', no Timestamp
+ * option is in the packet. When '1', then a Timestamp
+ * option is present in the packet.
*/
+ uint32_t tsdelta;
} __attribute__((packed));
-/* last 16 bytes of RX TPA End Completion Record */
+/* rx_tpa_end_cmpl_hi (size:128b/16B) */
struct rx_tpa_end_cmpl_hi {
- uint32_t tpa_dup_acks;
- /* unused3 is 28 b */
/*
* This value is the number of duplicate ACKs that have been
* received as part of the TPA operation.
*/
- #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf)
- #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0
- /* unused3 is 28 b */
- uint16_t tpa_seg_len;
- /*
- * This value is the valid when TPA completion is active. It
- * indicates the length of the longest segment of the TPA
- * operation for LRO mode and the length of the first segment in
- * GRO mode. This value may be used by GRO software to re-
- * construct the original packet stream from the TPA packet.
- * This is the length of all but the last segment for GRO. In
- * LRO mode this value may be used to indicate MSS size to the
- * stack.
- */
- uint16_t unused_3;
+ uint32_t tpa_dup_acks;
+ /*
+ * This value is the number of duplicate ACKs that have been
+ * received as part of the TPA operation.
+ */
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf)
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0
+ /*
+ * This value is the valid when TPA completion is active. It
+ * indicates the length of the longest segment of the TPA operation
+ * for LRO mode and the length of the first segment in GRO mode.
+ *
+ * This value may be used by GRO software to re-construct the original
+ * packet stream from the TPA packet. This is the length of all
+ * but the last segment for GRO. In LRO mode this value may be used
+ * to indicate MSS size to the stack.
+ */
+ uint16_t tpa_seg_len;
/* unused4 is 16 b */
- uint16_t errors_v2;
+ uint16_t unused3;
+ uint16_t errors_v2;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define RX_TPA_END_CMPL_V2 UINT32_C(0x1)
+ #define RX_TPA_END_CMPL_V2 UINT32_C(0x1)
+ #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define RX_TPA_END_CMPL_ERRORS_SFT 1
/*
* This error indicates that there was some sort of problem with
* the BDs for the packet that was found after part of the
- * packet was already placed. The packet should be treated as
+ * packet was already placed. The packet should be treated as
* invalid.
*/
- #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
- #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1
/*
- * This error occurs when there is a fatal HW
- * problem in the chip only. It indicates that
- * there were not BDs on chip but that there was
- * adequate reservation. provided by the TPA
- * block.
+ * This error occurs when there is a fatal HW problem in
+ * the chip only. It indicates that there were not
+ * BDs on chip but that there was adequate reservation.
+ * provided by the TPA block.
*/
#define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
(UINT32_C(0x2) << 1)
/*
- * This error occurs when TPA block was not
- * configured to reserve adequate BDs for TPA
- * operations on this RX ring. All data for the
- * TPA operation was not placed. This error can
- * also be generated when the number of segments
- * is not programmed correctly in TPA and the 33
- * total aggregation buffers allowed for the TPA
+ * This error occurs when TPA block was not configured to
+ * reserve adequate BDs for TPA operations on this RX
+ * ring. All data for the TPA operation was not placed.
+ *
+ * This error can also be generated when the number of
+ * segments is not programmed correctly in TPA and the
+ * 33 total aggregation buffers allowed for the TPA
* operation has been exceeded.
*/
#define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \
(UINT32_C(0x4) << 1)
#define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \
RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR
- #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe)
- #define RX_TPA_END_CMPL_ERRORS_SFT 1
- uint16_t unused_4;
/* unused5 is 16 b */
- uint32_t start_opaque;
+ uint16_t unused_4;
/*
* This is the opaque value that was completed for the TPA start
* completion that corresponds to this TPA end completion.
*/
+ uint32_t start_opaque;
} __attribute__((packed));
-/* HWRM Forwarded Request (16 bytes) */
+/* rx_abuf_cmpl (size:128b/16B) */
+struct rx_abuf_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define RX_ABUF_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_ABUF_CMPL_TYPE_SFT 0
+ /*
+ * RX Aggregation Buffer completion :
+ * Completion of an L2 aggregation buffer in support of
+ * TPA, HDS, or Jumbo packet completion. Length = 16B
+ */
+ #define RX_ABUF_CMPL_TYPE_RX_AGG UINT32_C(0x12)
+ #define RX_ABUF_CMPL_TYPE_LAST RX_ABUF_CMPL_TYPE_RX_AGG
+ /*
+ * This is the length of the data for the packet stored in this
+ * aggregation buffer identified by the opaque value. This does not
+ * include the length of any
+ * data placed in other aggregation BDs or in the packet or buffer
+ * BDs. This length does not include any space added due to
+ * hdr_offset register during HDS placement mode.
+ */
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this aggregation
+ * buffer corresponds to.
+ */
+ uint32_t opaque;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_ABUF_CMPL_V UINT32_C(0x1)
+ /* unused3 is 32 b */
+ uint32_t unused_2;
+} __attribute__((packed));
+
+/* eject_cmpl (size:128b/16B) */
+struct eject_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define EJECT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define EJECT_CMPL_TYPE_SFT 0
+ /*
+ * Statistics Ejection Completion:
+ * Completion of statistics data ejection buffer.
+ * Length = 16B
+ */
+ #define EJECT_CMPL_TYPE_STAT_EJECT UINT32_C(0x1a)
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ /*
+ * This is the length of the statistics data stored in this
+ * buffer.
+ */
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this ejection
+ * buffer corresponds to.
+ */
+ uint32_t opaque;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define EJECT_CMPL_V UINT32_C(0x1)
+ /* unused3 is 32 b */
+ uint32_t unused_2;
+} __attribute__((packed));
+
+/* hwrm_cmpl (size:128b/16B) */
+struct hwrm_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_CMPL_TYPE_SFT 0
+ /*
+ * HWRM Command Completion:
+ * Completion of an HWRM command.
+ */
+ #define HWRM_CMPL_TYPE_HWRM_DONE UINT32_C(0x20)
+ #define HWRM_CMPL_TYPE_LAST HWRM_CMPL_TYPE_HWRM_DONE
+ /* This is the sequence_id of the HWRM command that has completed. */
+ uint16_t sequence_id;
+ /* unused2 is 32 b */
+ uint32_t unused_1;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_CMPL_V UINT32_C(0x1)
+ /* unused4 is 32 b */
+ uint32_t unused_3;
+} __attribute__((packed));
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
struct hwrm_fwd_req_cmpl {
- uint16_t req_len_type;
- /* Length of forwarded request in bytes. */
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define HWRM_FWD_INPUT_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define HWRM_FWD_INPUT_CMPL_TYPE_SFT 0
+ uint16_t req_len_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
/* Forwarded HWRM Request */
- #define HWRM_FWD_INPUT_CMPL_TYPE_HWRM_FWD_INPUT UINT32_C(0x22)
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ UINT32_C(0x22)
+ #define HWRM_FWD_REQ_CMPL_TYPE_LAST \
+ HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
/* Length of forwarded request in bytes. */
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0)
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
- uint16_t source_id;
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0)
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
/*
- * Source ID of this request. Typically used in forwarding
- * requests and responses. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
- * HWRM
+ * Source ID of this request.
+ * Typically used in forwarding requests and responses.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
*/
- uint32_t unused_0;
+ uint16_t source_id;
/* unused1 is 32 b */
- uint32_t req_buf_addr_v[2];
+ uint32_t unused0;
/* Address of forwarded request. */
+ uint32_t req_buf_addr_v[2];
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define HWRM_FWD_INPUT_CMPL_V UINT32_C(0x1)
+ #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1)
/* Address of forwarded request. */
- #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe)
- #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe)
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
} __attribute__((packed));
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+struct hwrm_fwd_resp_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_FWD_RESP_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
+ /* Forwarded HWRM Response */
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP UINT32_C(0x24)
+ #define HWRM_FWD_RESP_CMPL_TYPE_LAST \
+ HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ /*
+ * Source ID of this response.
+ * Typically used in forwarding requests and responses.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t source_id;
+ /* Length of forwarded response in bytes. */
+ uint16_t resp_len;
+ /* unused2 is 16 b */
+ uint16_t unused_1;
+ /* Address of forwarded request. */
+ uint32_t resp_buf_addr_v[2];
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_FWD_RESP_CMPL_V UINT32_C(0x1)
+ /* Address of forwarded request. */
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK UINT32_C(0xfffffffe)
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
struct hwrm_async_event_cmpl {
- uint16_t type;
- /* unused1 is 10 b */
+ uint16_t type;
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
/* HWRM Asynchronous Event Information */
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
- /* unused1 is 10 b */
- uint16_t event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
/* Identifiers of events. */
+ uint16_t event_id;
/* Link status changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE UINT32_C(0x0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ UINT32_C(0x0)
/* Link MTU changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE \
+ UINT32_C(0x1)
/* Link speed changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE \
+ UINT32_C(0x2)
/* DCB Configuration changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE \
+ UINT32_C(0x3)
/* Port connection not allowed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED UINT32_C(0x4)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ UINT32_C(0x4)
/* Link speed configuration was not allowed */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
UINT32_C(0x5)
/* Link speed configuration change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE UINT32_C(0x6)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ UINT32_C(0x6)
/* Port PHY configuration change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE UINT32_C(0x7)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE \
+ UINT32_C(0x7)
/* Function driver unloaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \
+ UINT32_C(0x10)
/* Function driver loaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD UINT32_C(0x11)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD \
+ UINT32_C(0x11)
/* Function FLR related processing has completed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT UINT32_C(0x12)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT \
+ UINT32_C(0x12)
/* PF driver unloaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD UINT32_C(0x20)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ UINT32_C(0x20)
/* PF driver loaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD UINT32_C(0x21)
- /* VF Function Level Reset (FLR) */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR UINT32_C(0x30)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD \
+ UINT32_C(0x21)
+ /* VF Function Level Reset (FLR) */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR \
+ UINT32_C(0x30)
/* VF MAC Address Change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE UINT32_C(0x31)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE \
+ UINT32_C(0x31)
/* PF-VF communication channel status change. */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
UINT32_C(0x32)
/* VF Configuration Change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE \
+ UINT32_C(0x33)
/* LLFC/PFC Configuration Change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE UINT32_C(0x34)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE \
+ UINT32_C(0x34)
/* HWRM Error */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR UINT32_C(0xff)
- uint32_t event_data2;
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
/* Event specific data */
- uint8_t opaque_v;
- /* opaque is 7 b */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1)
/* opaque is 7 b */
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- uint8_t timestamp_lo;
- /* 8-lsb timestamp from POR (100-msec resolution) */
- uint16_t timestamp_hi;
- /* 16-lsb timestamp from POR (100-msec resolution) */
- uint32_t event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
/* Event specific data */
+ uint32_t event_data1;
} __attribute__((packed));
-/* hwrm_ver_get */
-/*
- * Description: This function is called by a driver to determine the HWRM
- * interface version supported by the HWRM firmware, the version of HWRM
- * firmware implementation, the name of HWRM firmware, the versions of other
- * embedded firmwares, and the names of other embedded firmwares, etc. Any
- * interface or firmware version with major = 0, minor = 0, and update = 0 shall
- * be considered an invalid version.
- */
-/* Input (24 bytes) */
-struct hwrm_ver_get_input {
- uint16_t req_type;
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_status_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link status changed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE \
+ UINT32_C(0x0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint16_t cmpl_ring;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates link status change */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE \
+ UINT32_C(0x1)
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * If this bit set to 0, then it indicates that the link
+ * was up and it went down.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN \
+ UINT32_C(0x0)
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * If this bit is set to 1, then it indicates that the link
+ * was down and it went up.
*/
- uint64_t resp_addr;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP \
+ UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ /* Indicates the physical port this link status change occur */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0xe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT \
+ 1
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 4
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link MTU changed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE \
+ UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_intf_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* The new MTU of the link in bytes. */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link speed changed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the major version of HWRM interface
- * specification supported by the driver HWRM implementation.
- * The interface major version is intended to change only when
- * non backward compatible changes are made to the HWRM
- * interface specification.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_intf_min;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
/*
- * This field represents the minor version of HWRM interface
- * specification supported by the driver HWRM implementation. A
- * change in interface minor version is used to reflect
- * significant backward compatible modification to HWRM
- * interface specification. This can be due to addition or
- * removal of functionality. HWRM interface specifications with
- * the same major version but different minor versions are
- * compatible.
+ * When this bit is '1', the link was forced to the
+ * force_link_speed value.
*/
- uint8_t hwrm_intf_upd;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE \
+ UINT32_C(0x1)
+ /* The new link speed in 100 Mbps units. */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK \
+ UINT32_C(0xfffe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT \
+ 1
+ /* 100Mb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB \
+ (UINT32_C(0x1) << 1)
+ /* 1Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB \
+ (UINT32_C(0xa) << 1)
+ /* 2Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB \
+ (UINT32_C(0x14) << 1)
+ /* 25Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB \
+ (UINT32_C(0x19) << 1)
+ /* 10Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB \
+ (UINT32_C(0x64) << 1)
+ /* 20Mb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB \
+ (UINT32_C(0xc8) << 1)
+ /* 25Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB \
+ (UINT32_C(0xfa) << 1)
+ /* 40Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB \
+ (UINT32_C(0x190) << 1)
+ /* 50Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB \
+ (UINT32_C(0x1f4) << 1)
+ /* 100Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB \
+ (UINT32_C(0x3e8) << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* DCB Configuration changed */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE \
+ UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ /* ETS configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS \
+ UINT32_C(0x1)
+ /* PFC configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC \
+ UINT32_C(0x2)
+ /* APP configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP \
+ UINT32_C(0x4)
+ uint8_t opaque_v;
/*
- * This field represents the update version of HWRM interface
- * specification supported by the driver HWRM implementation.
- * The interface update version is used to reflect minor changes
- * or bug fixes to a released HWRM interface specification.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t unused_0[5];
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 0
+ /* Priority recommended for RoCE traffic */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK \
+ UINT32_C(0xff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT \
+ 16
+ /* none is 255 */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE \
+ (UINT32_C(0xff) << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
+ /* Priority recommended for L2 traffic */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT \
+ 24
+ /* none is 255 */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE \
+ (UINT32_C(0xff) << 24)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
} __attribute__((packed));
-/* Output (128 bytes) */
-struct hwrm_ver_get_output {
- uint16_t error_code;
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Port connection not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ UINT32_C(0x4)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \
+ 0
+ /*
+ * This value indicates the current port level enforcement policy
+ * for the optics module when there is an optical module mismatch
+ * and port is not connected.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK \
+ UINT32_C(0xff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT \
+ 16
+ /* No enforcement */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE \
+ (UINT32_C(0x0) << 16)
+ /* Disable Transmit side Laser. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX \
+ (UINT32_C(0x1) << 16)
+ /* Raise a warning message. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG \
+ (UINT32_C(0x2) << 16)
+ /* Power down the module. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN \
+ (UINT32_C(0x3) << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link speed configuration was not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
+ UINT32_C(0x5)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_intf_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link speed configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ UINT32_C(0x6)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the major version of HWRM interface
- * specification supported by the HWRM implementation. The
- * interface major version is intended to change only when non
- * backward compatible changes are made to the HWRM interface
- * specification. A HWRM implementation that is compliant with
- * this specification shall provide value of 1 in this field.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_intf_min;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 0
/*
- * This field represents the minor version of HWRM interface
- * specification supported by the HWRM implementation. A change
- * in interface minor version is used to reflect significant
- * backward compatible modification to HWRM interface
- * specification. This can be due to addition or removal of
- * functionality. HWRM interface specifications with the same
- * major version but different minor versions are compatible. A
- * HWRM implementation that is compliant with this specification
- * shall provide value of 2 in this field.
+ * If set to 1, it indicates that the supported link speeds
+ * configuration on the port has changed.
+ * If set to 0, then there is no change in supported link speeds
+ * configuration.
*/
- uint8_t hwrm_intf_upd;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE \
+ UINT32_C(0x10000)
/*
- * This field represents the update version of HWRM interface
- * specification supported by the HWRM implementation. The
- * interface update version is used to reflect minor changes or
- * bug fixes to a released HWRM interface specification. A HWRM
- * implementation that is compliant with this specification
- * shall provide value of 2 in this field.
+ * If set to 1, it indicates that the link speed configuration
+ * on the port has become illegal or invalid.
+ * If set to 0, then the link speed configuration on the port is
+ * legal or valid.
*/
- uint8_t hwrm_intf_rsvd;
- uint8_t hwrm_fw_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG \
+ UINT32_C(0x20000)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_phy_cfg_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Port PHY configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE \
+ UINT32_C(0x7)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the major version of HWRM firmware. A
- * change in firmware major version represents a major firmware
- * release.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_fw_min;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 0
/*
- * This field represents the minor version of HWRM firmware. A
- * change in firmware minor version represents significant
- * firmware functionality changes.
+ * If set to 1, it indicates that the FEC
+ * configuration on the port has changed.
+ * If set to 0, then there is no change in FEC configuration.
*/
- uint8_t hwrm_fw_bld;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE \
+ UINT32_C(0x10000)
/*
- * This field represents the build version of HWRM firmware. A
- * change in firmware build version represents bug fixes to a
- * released firmware.
+ * If set to 1, it indicates that the EEE configuration
+ * on the port has changed.
+ * If set to 0, then there is no change in EEE configuration
+ * on the port.
*/
- uint8_t hwrm_fw_rsvd;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE \
+ UINT32_C(0x20000)
/*
- * This field is a reserved field. This field can be used to
- * represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version of the HWRM
- * firmware.
+ * If set to 1, it indicates that the pause configuration
+ * on the PHY has changed.
+ * If set to 0, then there is no change in the pause
+ * configuration on the PHY.
*/
- uint8_t mgmt_fw_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE \
+ UINT32_C(0x40000)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Function driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD \
+ UINT32_C(0x10)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the major version of mgmt firmware. A
- * change in major version represents a major release.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t mgmt_fw_min;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Function ID */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Function driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD \
+ UINT32_C(0x11)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the minor version of mgmt firmware. A
- * change in minor version represents significant functionality
- * changes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t mgmt_fw_bld;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Function ID */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */
+struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Function FLR related processing has completed */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT \
+ UINT32_C(0x12)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the build version of mgmt firmware. A
- * change in update version represents bug fixes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t mgmt_fw_rsvd;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Function ID */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* PF driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD \
+ UINT32_C(0x20)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field is a reserved field. This field can be used to
- * represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t netctrl_fw_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ /* Indicates the physical port this pf belongs to */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0x70000)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* PF driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD \
+ UINT32_C(0x21)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the major version of network control
- * firmware. A change in major version represents a major
- * release.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t netctrl_fw_min;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ /* Indicates the physical port this pf belongs to */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0x70000)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_flr {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* VF Function Level Reset (FLR) */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR UINT32_C(0x30)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the minor version of network control
- * firmware. A change in minor version represents significant
- * functionality changes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t netctrl_fw_bld;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* VF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* VF MAC Address Change */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE \
+ UINT32_C(0x31)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the build version of network control
- * firmware. A change in update version represents bug fixes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t netctrl_fw_rsvd;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* VF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* PF-VF communication channel status change. */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
+ UINT32_C(0x32)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field is a reserved field. This field can be used to
- * represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint32_t dev_caps_cfg;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
/*
- * This field is used to indicate device's capabilities and
- * configurations.
+ * If this bit is set to 1, then it indicates that the PF-VF
+ * communication was lost and it is established.
+ * If this bit set to 0, then it indicates that the PF-VF
+ * communication was established and it is lost.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED \
+ UINT32_C(0x1)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* VF Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE \
+ UINT32_C(0x33)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /*
+ * Each flag provided in this field indicates a specific VF
+ * configuration change. At least one of these flags shall be set to 1
+ * when an asynchronous event completion of this type is provided
+ * by the HWRM.
+ */
+ uint32_t event_data1;
/*
- * If set to 1, then secure firmware update behavior is
- * supported. If set to 0, then secure firmware update behavior
- * is not supported.
+ * If this bit is set to 1, then the value of MTU
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE \
UINT32_C(0x1)
/*
- * If set to 1, then firmware based DCBX agent is supported. If
- * set to 0, then firmware based DCBX agent capability is not
- * supported on this device.
+ * If this bit is set to 1, then the value of MRU
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE \
UINT32_C(0x2)
/*
- * If set to 1, then HWRM short command format is supported. If
- * set to 0, then HWRM short command format is not supported.
+ * If this bit is set to 1, then the value of default MAC
+ * address was changed on this VF.
+ * If set to 0, then this bit should be ignored.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE \
UINT32_C(0x4)
/*
- * If set to 1, then HWRM short command format is required. If
- * set to 0, then HWRM short command format is not required.
+ * If this bit is set to 1, then the value of default VLAN
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED \
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE \
UINT32_C(0x8)
- uint8_t roce_fw_maj;
- /*
- * This field represents the major version of RoCE firmware. A
- * change in major version represents a major release.
- */
- uint8_t roce_fw_min;
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_llfc_pfc_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* unused1 is 10 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK \
+ UINT32_C(0xffc0)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* LLFC/PFC Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE \
+ UINT32_C(0x34)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the minor version of RoCE firmware. A
- * change in minor version represents significant functionality
- * changes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t roce_fw_bld;
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates llfc pfc status change */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK \
+ UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT \
+ 0
/*
- * This field represents the build version of RoCE firmware. A
- * change in update version represents bug fixes.
+ * If this field set to 1, then it indicates that llfc is
+ * enabled.
*/
- uint8_t roce_fw_rsvd;
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC \
+ UINT32_C(0x1)
/*
- * This field is a reserved field. This field can be used to
- * represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * If this field is set to 2, then it indicates that pfc
+ * is enabled.
*/
- char hwrm_fw_name[16];
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC
+ /* Indicates the physical port this llfc pfc change occur */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0x1c)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT \
+ 2
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0x1fffe0)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 5
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* HWRM Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ /* Event specific data */
+ uint32_t event_data2;
+ /* Severity of HWRM Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ /* Warning */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING \
+ UINT32_C(0x0)
+ /* Non-fatal Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL \
+ UINT32_C(0x1)
+ /* Fatal Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ uint8_t opaque_v;
/*
- * This field represents the name of HWRM FW (ASCII chars with
- * NULL at the end).
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- char mgmt_fw_name[16];
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Time stamp for error event */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP \
+ UINT32_C(0x1)
+} __attribute__((packed));
+
+/*******************
+ * hwrm_func_reset *
+ *******************/
+
+
+/* hwrm_func_reset_input (size:192b/24B) */
+struct hwrm_func_reset_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field represents the name of mgmt FW (ASCII chars with
- * NULL at the end).
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- char netctrl_fw_name[16];
+ uint16_t cmpl_ring;
/*
- * This field represents the name of network control firmware
- * (ASCII chars with NULL at the end).
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t reserved2[4];
+ uint16_t seq_id;
/*
- * This field is reserved for future use. The responder should
- * set it to 0. The requester should ignore this field.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- char roce_fw_name[16];
+ uint16_t target_id;
/*
- * This field represents the name of RoCE FW (ASCII chars with
- * NULL at the end).
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t chip_num;
- /* This field returns the chip number. */
- uint8_t chip_rev;
- /* This field returns the revision of chip. */
- uint8_t chip_metal;
- /* This field returns the chip metal number. */
- uint8_t chip_bond_id;
- /* This field returns the bond id of the chip. */
- uint8_t chip_platform_type;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * This value indicates the type of platform used for chip
- * implementation.
+ * This bit must be '1' for the vf_id_valid field to be
+ * configured.
*/
- /* ASIC */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0)
- /* FPGA platform of the chip. */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1)
- /* Palladium platform of the chip. */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
- uint16_t max_req_win_len;
+ #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
/*
- * This field returns the maximum value of request window that
- * is supported by the HWRM. The request window is mapped into
- * device address space using MMIO.
+ * The ID of the VF that this PF is trying to reset.
+ * Only the parent PF shall be allowed to reset a child VF.
+ *
+ * A parent PF driver shall use this field only when a specific child VF
+ * is requested to be reset.
*/
- uint16_t max_resp_len;
- /* This field returns the maximum value of response buffer in bytes. */
- uint16_t def_req_timeout;
+ uint16_t vf_id;
+ /* This value indicates the level of a function reset. */
+ uint8_t func_reset_level;
/*
- * This field returns the default request timeout value in
- * milliseconds.
+ * Reset the caller function and its children VFs (if any). If no
+ * children functions exist, then reset the caller function only.
*/
- uint8_t init_pending;
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL \
+ UINT32_C(0x0)
+ /* Reset the caller function only */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME \
+ UINT32_C(0x1)
/*
- * This field will indicate if any subsystems is not fully
- * initialized.
+ * Reset all children VFs of the caller function driver if the
+ * caller is a PF driver.
+ * It is an error to specify this level by a VF driver.
+ * It is an error to specify this level by a PF driver with
+ * no children VFs.
*/
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \
+ UINT32_C(0x2)
/*
- * If set to 1, device is not ready. If set to 0, device is
- * ready to accept all HWRM commands.
+ * Reset a specific VF of the caller function driver if the caller
+ * is the parent PF driver.
+ * It is an error to specify this level by a VF driver.
+ * It is an error to specify this level by a PF driver that is not
+ * the parent of the VF that is being requested to reset.
*/
- #define HWRM_VER_GET_OUTPUT_INIT_PENDING_DEV_NOT_RDY UINT32_C(0x1)
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t valid;
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_LAST \
+ HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_func_reset_output (size:128b/16B) */
+struct hwrm_func_reset_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_reset */
-/*
- * Description: This command resets a hardware function (PCIe function) and
- * frees any resources used by the function. This command shall be initiated by
- * the driver after an FLR has occurred to prepare the function for re-use. This
- * command may also be initiated by a driver prior to doing it's own
- * configuration. This command puts the function into the reset state. In the
- * reset state, global and port related features of the chip are not available.
- */
-/*
- * Note: This command will reset a function that has already been disabled or
- * idled. The command returns all the resources owned by the function so a new
- * driver may allocate and configure resources normally.
- */
-/* Input (24 bytes) */
-struct hwrm_func_reset_input {
- uint16_t req_type;
+/********************
+ * hwrm_func_getfid *
+ ********************/
+
+
+/* hwrm_func_getfid_input (size:192b/24B) */
+struct hwrm_func_getfid_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t enables;
- /* This bit must be '1' for the vf_id_valid field to be configured. */
- #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
- uint16_t vf_id;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * The ID of the VF that this PF is trying to reset. Only the
- * parent PF shall be allowed to reset a child VF. A parent PF
- * driver shall use this field only when a specific child VF is
- * requested to be reset.
+ * This bit must be '1' for the pci_id field to be
+ * configured.
*/
- uint8_t func_reset_level;
- /* This value indicates the level of a function reset. */
+ #define HWRM_FUNC_GETFID_INPUT_ENABLES_PCI_ID UINT32_C(0x1)
/*
- * Reset the caller function and its children
- * VFs (if any). If no children functions exist,
- * then reset the caller function only.
+ * This value is the PCI ID of the queried function.
+ * If ARI is enabled, then it is
+ * Bus Number (8b):Function Number(8b). Otherwise, it is
+ * Bus Number (8b):Device Number (5b):Function Number(3b).
*/
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL UINT32_C(0x0)
- /* Reset the caller function only */
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME UINT32_C(0x1)
+ uint16_t pci_id;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_func_getfid_output (size:128b/16B) */
+struct hwrm_func_getfid_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * Reset all children VFs of the caller function
- * driver if the caller is a PF driver. It is an
- * error to specify this level by a VF driver.
- * It is an error to specify this level by a PF
- * driver with no children VFs.
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
*/
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \
- UINT32_C(0x2)
+ uint16_t fid;
+ uint8_t unused_0[5];
/*
- * Reset a specific VF of the caller function
- * driver if the caller is the parent PF driver.
- * It is an error to specify this level by a VF
- * driver. It is an error to specify this level
- * by a PF driver that is not the parent of the
- * VF that is being requested to reset.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF UINT32_C(0x3)
- uint8_t unused_0;
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_func_reset_output {
- uint16_t error_code;
+/**********************
+ * hwrm_func_vf_alloc *
+ **********************/
+
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
+struct hwrm_func_vf_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This bit must be '1' for the first_vf_id field to be
+ * configured.
*/
+ #define HWRM_FUNC_VF_ALLOC_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1)
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t first_vf_id;
+ /* The number of virtual functions requested. */
+ uint16_t num_vfs;
} __attribute__((packed));
-/* hwrm_func_vf_cfg */
-/*
- * Description: This command allows configuration of a VF by its driver. If this
- * function is called by a PF driver, then the HWRM shall fail this command. If
- * guest VLAN and/or MAC address are provided in this command, then the HWRM
- * shall set up appropriate MAC/VLAN filters for the VF that is being
- * configured. A VF driver should set VF MTU/MRU using this command prior to
- * allocating RX VNICs or TX rings for the corresponding VF.
- */
-/* Input (32 bytes) */
-struct hwrm_func_vf_cfg_input {
- uint16_t req_type;
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
+struct hwrm_func_vf_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The ID of the first VF allocated. */
+ uint16_t first_vf_id;
+ uint8_t unused_0[5];
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_func_vf_free *
+ *********************/
+
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
+struct hwrm_func_vf_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t enables;
- /* This bit must be '1' for the mtu field to be configured. */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
- /* This bit must be '1' for the guest_vlan field to be configured. */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2)
+ uint16_t target_id;
/*
- * This bit must be '1' for the async_event_cr field to be configured.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4)
- /* This bit must be '1' for the dflt_mac_addr field to be configured. */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8)
- uint16_t mtu;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * The maximum transmission unit requested on the function. The HWRM
- * should make sure that the mtu of the function does not exceed the mtu
- * of the physical port that this function is associated with. In
- * addition to requesting mtu per function, it is possible to configure
- * mtu per transmit ring. By default, the mtu of each transmit ring
- * associated with a function is equal to the mtu of the function. The
- * HWRM should make sure that the mtu of each transmit ring that is
- * assigned to a function has a valid mtu.
+ * This bit must be '1' for the first_vf_id field to be
+ * configured.
*/
- uint16_t guest_vlan;
+ #define HWRM_FUNC_VF_FREE_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1)
/*
- * The guest VLAN for the function being configured. This field's format
- * is same as 802.1Q Tag's Tag Control Information (TCI) format that
- * includes both Priority Code Point (PCP) and VLAN Identifier (VID).
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
*/
- uint16_t async_event_cr;
+ uint16_t first_vf_id;
/*
- * ID of the target completion ring for receiving asynchronous event
- * completions. If this field is not valid, then the HWRM shall use the
- * default completion ring of the function that is being configured as
- * the target completion ring for providing any asynchronous event
- * completions for that function. If this field is valid, then the HWRM
- * shall use the completion ring identified by this ID as the target
- * completion ring for providing any asynchronous event completions for
- * the function that is being configured.
+ * The number of virtual functions requested.
+ * 0xFFFF - Cleanup all children of this PF.
*/
- uint8_t dflt_mac_addr[6];
+ uint16_t num_vfs;
+} __attribute__((packed));
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
+struct hwrm_func_vf_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value is the current MAC address requested by the VF driver to
- * be configured on this VF. A value of 00-00-00-00-00-00 indicates no
- * MAC address configuration is requested by the VF driver. The parent
- * PF driver may reject or overwrite this MAC address.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
+/********************
+ * hwrm_func_vf_cfg *
+ ********************/
-struct hwrm_func_vf_cfg_output {
- uint16_t error_code;
+
+/* hwrm_func_vf_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
-} __attribute__((packed));
-
-/* hwrm_func_qcaps */
-/*
- * Description: This command returns capabilities of a function. The input FID
- * value is used to indicate what function is being queried. This allows a
- * physical function driver to query virtual functions that are children of the
- * physical function. The output FID value is needed to configure Rings and
- * MSI-X vectors so their DMA operations appear correctly on the PCI bus.
- */
-/* Input (24 bytes) */
-struct hwrm_func_qcaps_input {
- uint16_t req_type;
+ uint16_t target_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t cmpl_ring;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This bit must be '1' for the mtu field to be
+ * configured.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU \
+ UINT32_C(0x1)
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * This bit must be '1' for the guest_vlan field to be
+ * configured.
*/
- uint64_t resp_addr;
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN \
+ UINT32_C(0x2)
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * This bit must be '1' for the async_event_cr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the dflt_mac_addr field to be
+ * configured.
*/
- uint16_t fid;
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the num_rsscos_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \
+ UINT32_C(0x10)
/*
- * Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * This bit must be '1' for the num_cmpl_rings field to be
+ * configured.
*/
- uint16_t unused_0[3];
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the num_tx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the num_rx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the num_l2_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the num_vnics field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the num_stat_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the num_hw_ring_grps field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \
+ UINT32_C(0x800)
+ /*
+ * The maximum transmission unit requested on the function.
+ * The HWRM should make sure that the mtu of
+ * the function does not exceed the mtu of the physical
+ * port that this function is associated with.
+ *
+ * In addition to requesting mtu per function, it is
+ * possible to configure mtu per transmit ring.
+ * By default, the mtu of each transmit ring associated
+ * with a function is equal to the mtu of the function.
+ * The HWRM should make sure that the mtu of each transmit
+ * ring that is assigned to a function has a valid mtu.
+ */
+ uint16_t mtu;
+ /*
+ * The guest VLAN for the function being configured.
+ * This field's format is same as 802.1Q Tag's
+ * Tag Control Information (TCI) format that includes both
+ * Priority Code Point (PCP) and VLAN Identifier (VID).
+ */
+ uint16_t guest_vlan;
+ /*
+ * ID of the target completion ring for receiving asynchronous
+ * event completions. If this field is not valid, then the
+ * HWRM shall use the default completion ring of the function
+ * that is being configured as the target completion ring for
+ * providing any asynchronous event completions for that
+ * function.
+ * If this field is valid, then the HWRM shall use the
+ * completion ring identified by this ID as the target
+ * completion ring for providing any asynchronous event
+ * completions for the function that is being configured.
+ */
+ uint16_t async_event_cr;
+ /*
+ * This value is the current MAC address requested by the VF
+ * driver to be configured on this VF. A value of
+ * 00-00-00-00-00-00 indicates no MAC address configuration
+ * is requested by the VF driver.
+ * The parent PF driver may reject or overwrite this
+ * MAC address.
+ */
+ uint8_t dflt_mac_addr[6];
+ uint32_t flags;
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of TX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST \
+ UINT32_C(0x1)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST \
+ UINT32_C(0x2)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of CMPL rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \
+ UINT32_C(0x4)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RSS ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \
+ UINT32_C(0x8)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of ring groups) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \
+ UINT32_C(0x10)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of stat ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \
+ UINT32_C(0x20)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of VNICs) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \
+ UINT32_C(0x40)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of L2 ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \
+ UINT32_C(0x80)
+ /* The number of RSS/COS contexts requested for the VF. */
+ uint16_t num_rsscos_ctxs;
+ /* The number of completion rings requested for the VF. */
+ uint16_t num_cmpl_rings;
+ /* The number of transmit rings requested for the VF. */
+ uint16_t num_tx_rings;
+ /* The number of receive rings requested for the VF. */
+ uint16_t num_rx_rings;
+ /* The number of L2 contexts requested for the VF. */
+ uint16_t num_l2_ctxs;
+ /* The number of vnics requested for the VF. */
+ uint16_t num_vnics;
+ /* The number of statistic contexts requested for the VF. */
+ uint16_t num_stat_ctxs;
+ /* The number of HW ring groups requested for the VF. */
+ uint16_t num_hw_ring_grps;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Output (80 bytes) */
-struct hwrm_func_qcaps_output {
- uint16_t error_code;
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_func_qcaps *
+ *******************/
+
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
+struct hwrm_func_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint16_t fid;
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_qcaps_output (size:640b/80B) */
+struct hwrm_func_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the
- * PCI bus as belonging to a particular PCI function.
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
*/
- uint16_t port_id;
+ uint16_t fid;
/*
- * Port ID of port that this function is associated with. Valid
- * only for the PF. 0xFF... (All Fs) if this function is not
- * associated with any port. 0xFF... (All Fs) if this function
- * is called from a VF.
+ * Port ID of port that this function is associated with.
+ * Valid only for the PF.
+ * 0xFF... (All Fs) if this function is not associated with
+ * any port.
+ * 0xFF... (All Fs) if this function is called from a VF.
*/
- uint32_t flags;
+ uint16_t port_id;
+ uint32_t flags;
/* If 1, then Push mode is supported on this function. */
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED \
+ UINT32_C(0x1)
/*
* If 1, then the global MSI-X auto-masking is enabled for the
* device.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \
UINT32_C(0x2)
/*
- * If 1, then the Precision Time Protocol (PTP) processing is
- * supported on this function. The HWRM should enable PTP on
- * only a single Physical Function (PF) per port.
+ * If 1, then the Precision Time Protocol (PTP) processing
+ * is supported on this function.
+ * The HWRM should enable PTP on only a single Physical
+ * Function (PF) per port.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED \
+ UINT32_C(0x4)
/*
- * If 1, then RDMA over Converged Ethernet (RoCE) v1 is
- * supported on this function.
+ * If 1, then RDMA over Converged Ethernet (RoCE) v1
+ * is supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED \
+ UINT32_C(0x8)
/*
- * If 1, then RDMA over Converged Ethernet (RoCE) v2 is
- * supported on this function.
+ * If 1, then RDMA over Converged Ethernet (RoCE) v2
+ * is supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED \
+ UINT32_C(0x10)
/*
- * If 1, then control and configuration of WoL magic packet are
- * supported on this function.
+ * If 1, then control and configuration of WoL magic packet
+ * are supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \
UINT32_C(0x20)
/*
- * If 1, then control and configuration of bitmap pattern packet
- * are supported on this function.
+ * If 1, then control and configuration of bitmap pattern
+ * packet are supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED \
+ UINT32_C(0x40)
/*
* If set to 1, then the control and configuration of rate limit
* of an allocated TX ring on the queried function is supported.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED \
+ UINT32_C(0x80)
/*
- * If 1, then control and configuration of minimum and maximum
- * bandwidths are supported on the queried function.
+ * If 1, then control and configuration of minimum and
+ * maximum bandwidths are supported on the queried function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED \
+ UINT32_C(0x100)
/*
- * If the query is for a VF, then this flag shall be ignored. If
- * this query is for a PF and this flag is set to 1, then the PF
- * has the capability to set the rate limits on the TX rings of
- * its children VFs. If this query is for a PF and this flag is
- * set to 0, then the PF does not have the capability to set the
- * rate limits on the TX rings of its children VFs.
+ * If the query is for a VF, then this flag shall be ignored.
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to set the rate limits
+ * on the TX rings of its children VFs.
+ * If this query is for a PF and this flag is set to 0, then
+ * the PF does not have the capability to set the rate limits
+ * on the TX rings of its children VFs.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \
UINT32_C(0x200)
/*
- * If the query is for a VF, then this flag shall be ignored. If
- * this query is for a PF and this flag is set to 1, then the PF
- * has the capability to set the minimum and/or maximum
- * bandwidths for its children VFs. If this query is for a PF
- * and this flag is set to 0, then the PF does not have the
- * capability to set the minimum or maximum bandwidths for its
- * children VFs.
+ * If the query is for a VF, then this flag shall be ignored.
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to set the minimum and/or
+ * maximum bandwidths for its children VFs.
+ * If this query is for a PF and this flag is set to 0, then
+ * the PF does not have the capability to set the minimum or
+ * maximum bandwidths for its children VFs.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED \
+ UINT32_C(0x400)
/*
* Standard TX Ring mode is used for the allocation of TX ring
* and underlying scheduling resources that allow bandwidth
- * reservation and limit settings on the queried function. If
- * set to 1, then standard TX ring mode is supported on the
- * queried function. If set to 0, then standard TX ring mode is
- * not available on the queried function.
+ * reservation and limit settings on the queried function.
+ * If set to 1, then standard TX ring mode is supported
+ * on the queried function.
+ * If set to 0, then standard TX ring mode is not available
+ * on the queried function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \
UINT32_C(0x800)
- uint8_t mac_address[6];
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect GENEVE tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x1000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect NVGRE tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x2000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect GRE tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GRE_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x4000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect MPLS tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_MPLS_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x8000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to support pcie stats.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED \
+ UINT32_C(0x10000)
/*
* This value is current MAC address configured for this
- * function. A value of 00-00-00-00-00-00 indicates no MAC
- * address is currently configured.
+ * function. A value of 00-00-00-00-00-00 indicates no
+ * MAC address is currently configured.
*/
- uint16_t max_rsscos_ctx;
+ uint8_t mac_address[6];
/*
- * The maximum number of RSS/COS contexts that can be allocated
- * to the function.
+ * The maximum number of RSS/COS contexts that can be
+ * allocated to the function.
*/
- uint16_t max_cmpl_rings;
+ uint16_t max_rsscos_ctx;
/*
- * The maximum number of completion rings that can be allocated
- * to the function.
+ * The maximum number of completion rings that can be
+ * allocated to the function.
*/
- uint16_t max_tx_rings;
+ uint16_t max_cmpl_rings;
/*
- * The maximum number of transmit rings that can be allocated to
- * the function.
+ * The maximum number of transmit rings that can be
+ * allocated to the function.
*/
- uint16_t max_rx_rings;
+ uint16_t max_tx_rings;
/*
- * The maximum number of receive rings that can be allocated to
- * the function.
+ * The maximum number of receive rings that can be
+ * allocated to the function.
*/
- uint16_t max_l2_ctxs;
+ uint16_t max_rx_rings;
/*
- * The maximum number of L2 contexts that can be allocated to
- * the function.
+ * The maximum number of L2 contexts that can be
+ * allocated to the function.
*/
- uint16_t max_vnics;
+ uint16_t max_l2_ctxs;
/*
- * The maximum number of VNICs that can be allocated to the
- * function.
+ * The maximum number of VNICs that can be
+ * allocated to the function.
*/
- uint16_t first_vf_id;
+ uint16_t max_vnics;
/*
- * The identifier for the first VF enabled on a PF. This is
- * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
- * this command is called on a PF with SR-IOV disabled or on a
- * VF.
+ * The identifier for the first VF enabled on a PF. This
+ * is valid only on the PF with SR-IOV enabled.
+ * 0xFF... (All Fs) if this command is called on a PF with
+ * SR-IOV disabled or on a VF.
*/
- uint16_t max_vfs;
+ uint16_t first_vf_id;
/*
- * The maximum number of VFs that can be allocated to the
- * function. This is valid only on the PF with SR-IOV enabled.
- * 0xFF... (All Fs) if this command is called on a PF with SR-
- * IOV disabled or on a VF.
+ * The maximum number of VFs that can be
+ * allocated to the function. This is valid only on the
+ * PF with SR-IOV enabled. 0xFF... (All Fs) if this
+ * command is called on a PF with SR-IOV disabled or
+ * on a VF.
*/
- uint16_t max_stat_ctx;
+ uint16_t max_vfs;
/*
* The maximum number of statistic contexts that can be
* allocated to the function.
*/
- uint32_t max_encap_records;
+ uint16_t max_stat_ctx;
/*
* The maximum number of Encapsulation records that can be
* offloaded by this function.
*/
- uint32_t max_decap_records;
+ uint32_t max_encap_records;
/*
- * The maximum number of decapsulation records that can be
- * offloaded by this function.
+ * The maximum number of decapsulation records that can
+ * be offloaded by this function.
*/
- uint32_t max_tx_em_flows;
+ uint32_t max_decap_records;
/*
- * The maximum number of Exact Match (EM) flows that can be
+ * The maximum number of Exact Match (EM) flows that can be
* offloaded by this function on the TX side.
*/
- uint32_t max_tx_wm_flows;
+ uint32_t max_tx_em_flows;
/*
- * The maximum number of Wildcard Match (WM) flows that can be
- * offloaded by this function on the TX side.
+ * The maximum number of Wildcard Match (WM) flows that can
+ * be offloaded by this function on the TX side.
*/
- uint32_t max_rx_em_flows;
+ uint32_t max_tx_wm_flows;
/*
- * The maximum number of Exact Match (EM) flows that can be
+ * The maximum number of Exact Match (EM) flows that can be
* offloaded by this function on the RX side.
*/
- uint32_t max_rx_wm_flows;
+ uint32_t max_rx_em_flows;
/*
- * The maximum number of Wildcard Match (WM) flows that can be
- * offloaded by this function on the RX side.
+ * The maximum number of Wildcard Match (WM) flows that can
+ * be offloaded by this function on the RX side.
*/
- uint32_t max_mcast_filters;
+ uint32_t max_rx_wm_flows;
/*
- * The maximum number of multicast filters that can be supported
- * by this function on the RX side.
+ * The maximum number of multicast filters that can
+ * be supported by this function on the RX side.
*/
- uint32_t max_flow_id;
+ uint32_t max_mcast_filters;
/*
- * The maximum value of flow_id that can be supported in
- * completion records.
+ * The maximum value of flow_id that can be supported
+ * in completion records.
*/
- uint32_t max_hw_ring_grps;
+ uint32_t max_flow_id;
/*
- * The maximum number of HW ring groups that can be supported on
- * this function.
+ * The maximum number of HW ring groups that can be
+ * supported on this function.
*/
- uint16_t max_sp_tx_rings;
+ uint32_t max_hw_ring_grps;
/*
- * The maximum number of strict priority transmit rings that can
- * be allocated to the function. This number indicates the
- * maximum number of TX rings that can be assigned strict
- * priorities out of the maximum number of TX rings that can be
- * allocated (max_tx_rings) to the function.
+ * The maximum number of strict priority transmit rings
+ * that can be allocated to the function.
+ * This number indicates the maximum number of TX rings
+ * that can be assigned strict priorities out of the
+ * maximum number of TX rings that can be allocated
+ * (max_tx_rings) to the function.
*/
- uint8_t unused_0;
- uint8_t valid;
+ uint16_t max_sp_tx_rings;
+ uint8_t unused_0;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_qcfg */
-/*
- * Description: This command returns the current configuration of a function.
- * The input FID value is used to indicate what function is being queried. This
- * allows a physical function driver to query virtual functions that are
- * children of the physical function. The output FID value is needed to
- * configure Rings and MSI-X vectors so their DMA operations appear correctly on
- * the PCI bus. This command should be called by every driver after
- * 'hwrm_func_cfg' to get the actual number of resources allocated by the HWRM.
- * The values returned by hwrm_func_qcfg are the values the driver shall use.
- * These values may be different than what was originally requested in the
- * 'hwrm_func_cfg' command.
- */
-/* Input (24 bytes) */
+/******************
+ * hwrm_func_qcfg *
+ ******************/
+
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
struct hwrm_func_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint16_t unused_0[3];
+ uint16_t fid;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (72 bytes) */
+/* hwrm_func_qcfg_output (size:640b/80B) */
struct hwrm_func_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint16_t fid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the
- * PCI bus as belonging to a particular PCI function.
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
*/
- uint16_t port_id;
+ uint16_t fid;
/*
* Port ID of port that this function is associated with.
- * 0xFF... (All Fs) if this function is not associated with any
- * port.
+ * 0xFF... (All Fs) if this function is not associated with
+ * any port.
*/
- uint16_t vlan;
+ uint16_t port_id;
/*
- * This value is the current VLAN setting for this function. The
- * value of 0 for this field indicates no priority tagging or
- * VLAN is used. This field's format is same as 802.1Q Tag's Tag
- * Control Information (TCI) format that includes both Priority
- * Code Point (PCP) and VLAN Identifier (VID).
+ * This value is the current VLAN setting for this
+ * function. The value of 0 for this field indicates
+ * no priority tagging or VLAN is used.
+ * This field's format is same as 802.1Q Tag's
+ * Tag Control Information (TCI) format that includes both
+ * Priority Code Point (PCP) and VLAN Identifier (VID).
*/
- uint16_t flags;
+ uint16_t vlan;
+ uint16_t flags;
/*
* If 1, then magic packet based Out-Of-Box WoL is enabled on
* the port associated with this function.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
UINT32_C(0x1)
/*
- * If 1, then bitmap pattern based Out-Of-Box WoL packet is
- * enabled on the port associated with this function.
+ * If 1, then bitmap pattern based Out-Of-Box WoL packet is enabled
+ * on the port associated with this function.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED UINT32_C(0x2)
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED \
+ UINT32_C(0x2)
/*
- * If set to 1, then FW based DCBX agent is enabled and running
- * on the port associated with this function. If set to 0, then
- * DCBX agent is not running in the firmware.
+ * If set to 1, then FW based DCBX agent is enabled and running on
+ * the port associated with this function.
+ * If set to 0, then DCBX agent is not running in the firmware.
*/
#define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \
UINT32_C(0x4)
/*
* Standard TX Ring mode is used for the allocation of TX ring
* and underlying scheduling resources that allow bandwidth
- * reservation and limit settings on the queried function. If
- * set to 1, then standard TX ring mode is enabled on the
- * queried function. If set to 0, then the standard TX ring mode
- * is disabled on the queried function. In this extended TX ring
- * resource mode, the minimum and maximum bandwidth settings are
- * not supported to allow the allocation of TX rings to span
- * multiple scheduler nodes.
- */
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \
+ * reservation and limit settings on the queried function.
+ * If set to 1, then standard TX ring mode is enabled
+ * on the queried function.
+ * If set to 0, then the standard TX ring mode is disabled
+ * on the queried function. In this extended TX ring resource
+ * mode, the minimum and maximum bandwidth settings are not
+ * supported to allow the allocation of TX rings to span multiple
+ * scheduler nodes.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \
UINT32_C(0x8)
/*
- * If set to 1 then FW based LLDP agent is enabled and running
- * on the port associated with this function. If set to 0 then
- * the LLDP agent is not running in the firmware.
+ * If set to 1 then FW based LLDP agent is enabled and running on
+ * the port associated with this function.
+ * If set to 0 then the LLDP agent is not running in the firmware.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED UINT32_C(0x10)
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED \
+ UINT32_C(0x10)
/*
- * If set to 1, then multi-host mode is active for this
- * function. If set to 0, then multi-host mode is inactive for
- * this function or not applicable for this device.
+ * If set to 1, then multi-host mode is active for this function.
+ * If set to 0, then multi-host mode is inactive for this function
+ * or not applicable for this device.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST UINT32_C(0x20)
- uint8_t mac_address[6];
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST \
+ UINT32_C(0x20)
/*
* This value is current MAC address configured for this
- * function. A value of 00-00-00-00-00-00 indicates no MAC
- * address is currently configured.
+ * function. A value of 00-00-00-00-00-00 indicates no
+ * MAC address is currently configured.
*/
- uint16_t pci_id;
+ uint8_t mac_address[6];
/*
- * This value is current PCI ID of this function. If ARI is
- * enabled, then it is Bus Number (8b):Function Number(8b).
- * Otherwise, it is Bus Number (8b):Device Number (4b):Function
- * Number(4b).
+ * This value is current PCI ID of this
+ * function. If ARI is enabled, then it is
+ * Bus Number (8b):Function Number(8b). Otherwise, it is
+ * Bus Number (8b):Device Number (4b):Function Number(4b).
+ * If multi-host mode is active, the 4 lsb will indicate
+ * the PF index for this function.
*/
- uint16_t alloc_rsscos_ctx;
+ uint16_t pci_id;
/*
- * The number of RSS/COS contexts currently allocated to the
- * function.
+ * The number of RSS/COS contexts currently
+ * allocated to the function.
*/
- uint16_t alloc_cmpl_rings;
+ uint16_t alloc_rsscos_ctx;
/*
- * The number of completion rings currently allocated to the
- * function. This does not include the rings allocated to any
- * children functions if any.
+ * The number of completion rings currently allocated to
+ * the function. This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t alloc_tx_rings;
+ uint16_t alloc_cmpl_rings;
/*
- * The number of transmit rings currently allocated to the
- * function. This does not include the rings allocated to any
- * children functions if any.
+ * The number of transmit rings currently allocated to
+ * the function. This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t alloc_rx_rings;
+ uint16_t alloc_tx_rings;
/*
- * The number of receive rings currently allocated to the
- * function. This does not include the rings allocated to any
- * children functions if any.
+ * The number of receive rings currently allocated to
+ * the function. This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t alloc_l2_ctx;
+ uint16_t alloc_rx_rings;
/* The allocated number of L2 contexts to the function. */
- uint16_t alloc_vnics;
+ uint16_t alloc_l2_ctx;
/* The allocated number of vnics to the function. */
- uint16_t mtu;
+ uint16_t alloc_vnics;
/*
- * The maximum transmission unit of the function. For rings
- * allocated on this function, this default value is used if
- * ring MTU is not specified.
+ * The maximum transmission unit of the function.
+ * For rings allocated on this function, this default
+ * value is used if ring MTU is not specified.
*/
- uint16_t mru;
+ uint16_t mtu;
/*
- * The maximum receive unit of the function. For vnics allocated
- * on this function, this default value is used if vnic MRU is
- * not specified.
+ * The maximum receive unit of the function.
+ * For vnics allocated on this function, this default
+ * value is used if vnic MRU is not specified.
*/
- uint16_t stat_ctx_id;
+ uint16_t mru;
/* The statistics context assigned to a function. */
- uint8_t port_partition_type;
+ uint16_t stat_ctx_id;
/*
- * The HWRM shall return Unknown value for this field when this
- * command is used to query VF's configuration.
+ * The HWRM shall return Unknown value for this field
+ * when this command is used to query VF's configuration.
*/
+ uint8_t port_partition_type;
/* Single physical function */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0)
/* Multiple physical functions */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1)
/* Network Partitioning 1.0 */
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2)
/* Network Partitioning 1.5 */
@@ -2845,62 +5510,64 @@ struct hwrm_func_qcfg_output {
/* Network Partitioning 2.0 */
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4)
/* Unknown */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN UINT32_C(0xff)
- uint8_t port_pf_cnt;
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN
/*
- * This field will indicate number of physical functions on this
- * port_partition. HWRM shall return unavail (i.e. value of 0)
- * for this field when this command is used to query VF's
- * configuration or from older firmware that doesn't support
- * this field.
+ * This field will indicate number of physical functions on this port_partition.
+ * HWRM shall return unavail (i.e. value of 0) for this field
+ * when this command is used to query VF's configuration or
+ * from older firmware that doesn't support this field.
*/
+ uint8_t port_pf_cnt;
/* number of PFs is not available */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0)
- uint16_t dflt_vnic_id;
- /* The default VNIC ID assigned to a function that is being queried. */
- uint16_t max_mtu_configured;
- /*
- * This value specifies the MAX MTU that can be configured by
- * host drivers. This 'max_mtu_configure' can be HW max MTU or
- * OEM applications specified value. Host drivers can't
- * configure the MTU greater than this value. Host drivers
- * should read this value prior to configuring the MTU. FW will
- * fail the host request with MTU greater than
- * 'max_mtu_configured'.
- */
- uint32_t min_bw;
- /*
- * Minimum BW allocated for this function. The HWRM will
- * translate this value into byte counter and time interval used
- * for the scheduler inside the device. A value of 0 indicates
- * the minimum bandwidth is not configured.
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL
+ /*
+ * The default VNIC ID assigned to a function that is
+ * being queried.
*/
+ uint16_t dflt_vnic_id;
+ uint16_t max_mtu_configured;
+ /*
+ * Minimum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
+ * A value of 0 indicates the minimum bandwidth is not
+ * configured.
+ */
+ uint32_t min_bw;
/* The bandwidth value. */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \
(UINT32_C(0x1) << 28)
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \
- FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES
+ HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
+ /* Value is in Kb or KB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
+ /* Value is in Gb or GB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
@@ -2910,40 +5577,44 @@ struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \
- FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID
- uint32_t max_bw;
+ HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID
/*
- * Maximum BW allocated for this function. The HWRM will
- * translate this value into byte counter and time interval used
- * for the scheduler inside the device. A value of 0 indicates
- * that the maximum bandwidth is not configured.
+ * Maximum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
+ * A value of 0 indicates that the maximum bandwidth is not
+ * configured.
*/
+ uint32_t max_bw;
/* The bandwidth value. */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \
(UINT32_C(0x1) << 28)
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \
- FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES
+ HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
+ /* Value is in Kb or KB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
+ /* Value is in Gb or GB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
@@ -2953,566 +5624,722 @@ struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \
- FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID
- uint8_t evb_mode;
+ HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID
/*
* This value indicates the Edge virtual bridge mode for the
* domain that this function belongs to.
*/
- /* No Edge Virtual Bridging (EVB) */
- #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
- /* Virtual Ethernet Bridge (VEB) */
- #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1)
- /* Virtual Ethernet Port Aggregator (VEPA) */
- #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2)
- uint8_t unused_0;
- uint16_t alloc_vfs;
+ uint8_t evb_mode;
+ /* No Edge Virtual Bridging (EVB) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
+ /* Virtual Ethernet Bridge (VEB) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1)
+ /* Virtual Ethernet Port Aggregator (VEPA) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2)
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA
+ uint8_t options;
+ /*
+ * This value indicates the PCIE device cache line size.
+ * The cache line size allows the DMA writes to terminate and
+ * start at the cache boundary.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_MASK \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SFT 0
+ /* Cache Line Size 64 bytes */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \
+ UINT32_C(0x0)
+ /* Cache Line Size 128 bytes */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \
+ UINT32_C(0x1)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128
+ /* Reserved for future. */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_SFT 2
+ /*
+ * The number of VFs that are allocated to the function.
+ * This is valid only on the PF with SR-IOV enabled.
+ * 0xFF... (All Fs) if this command is called on a PF with
+ * SR-IOV disabled or on a VF.
+ */
+ uint16_t alloc_vfs;
/*
- * The number of VFs that are allocated to the function. This is
- * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
- * this command is called on a PF with SR-IOV disabled or on a
- * VF.
+ * The number of allocated multicast filters for this
+ * function on the RX side.
*/
- uint32_t alloc_mcast_filters;
+ uint32_t alloc_mcast_filters;
/*
- * The number of allocated multicast filters for this function
- * on the RX side.
+ * The number of allocated HW ring groups for this
+ * function.
+ */
+ uint32_t alloc_hw_ring_grps;
+ /*
+ * The number of strict priority transmit rings out of
+ * currently allocated TX rings to the function
+ * (alloc_tx_rings).
+ */
+ uint16_t alloc_sp_tx_rings;
+ /*
+ * The number of statistics contexts
+ * currently reserved for the function.
*/
- uint32_t alloc_hw_ring_grps;
- /* The number of allocated HW ring groups for this function. */
- uint16_t alloc_sp_tx_rings;
+ uint16_t alloc_stat_ctx;
/*
- * The number of strict priority transmit rings out of currently
- * allocated TX rings to the function (alloc_tx_rings).
+ * This field specifies how many NQs are reserved for the PF.
+ * Remaining NQs that belong to the PF are available for VFs.
+ * Once a PF has created VFs, it cannot change how many NQs are
+ * reserved for itself (since the NQs must be contiguous in HW).
*/
- uint8_t unused_1;
- uint8_t valid;
+ uint16_t alloc_msix;
+ uint8_t unused_2[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_vlan_qcfg */
-/*
- * Description: This command should be called by PF driver to get the current
- * C-TAG, S-TAG and correcponsing PCP and TPID values configured for the
- * function.
- */
-/* Input (24 bytes) */
+/***********************
+ * hwrm_func_vlan_qcfg *
+ ***********************/
+
+
+/* hwrm_func_vlan_qcfg_input (size:192b/24B) */
struct hwrm_func_vlan_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function that is being configured. If set
- * to 0xFF... (All Fs), then the configuration is for the
- * requesting function.
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
*/
- uint16_t unused_0[3];
-};
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
-/* Output (40 bytes) */
+/* hwrm_func_vlan_qcfg_output (size:320b/40B) */
struct hwrm_func_vlan_qcfg_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
- */
- uint16_t stag_vid;
+ uint8_t valid;
/* S-TAG VLAN identifier configured for the function. */
- uint8_t stag_pcp;
+ uint16_t stag_vid;
/* S-TAG PCP value configured for the function. */
- uint8_t unused_4;
- uint16_t stag_tpid;
+ uint8_t stag_pcp;
+ uint8_t unused_1;
/*
- * S-TAG TPID value configured for the function. This field is
- * specified in network byte order.
+ * S-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
*/
- uint16_t ctag_vid;
+ uint16_t stag_tpid;
/* C-TAG VLAN identifier configured for the function. */
- uint8_t ctag_pcp;
+ uint16_t ctag_vid;
/* C-TAG PCP value configured for the function. */
- uint8_t unused_5;
- uint16_t ctag_tpid;
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
/*
- * C-TAG TPID value configured for the function. This field is
- * specified in network byte order.
+ * C-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
*/
- uint32_t rsvd2;
+ uint16_t ctag_tpid;
/* Future use. */
- uint32_t rsvd3;
+ uint32_t rsvd2;
/* Future use. */
- uint32_t unused_6;
-};
+ uint32_t rsvd3;
+ uint32_t unused_3;
+} __attribute__((packed));
-/* hwrm_func_vlan_cfg */
-/*
- * Description: This command allows PF driver to configure C-TAG, S-TAG and
- * corresponding PCP and TPID values for a function.
- */
-/* Input (48 bytes) */
+/**********************
+ * hwrm_func_vlan_cfg *
+ **********************/
+
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
struct hwrm_func_vlan_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the stag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the ctag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the stag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the ctag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stag_tpid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ctag_tpid field to be
+ * configured.
*/
- uint16_t fid;
- /*
- * Function ID of the function that is being configured. If set
- * to 0xFF... (All Fs), then the configuration is for the
- * requesting function.
- */
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t enables;
- /* This bit must be '1' for the stag_vid field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
- /* This bit must be '1' for the ctag_vid field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
- /* This bit must be '1' for the stag_pcp field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
- /* This bit must be '1' for the ctag_pcp field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
- /* This bit must be '1' for the stag_tpid field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
- /* This bit must be '1' for the ctag_tpid field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
- uint16_t stag_vid;
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
/* S-TAG VLAN identifier configured for the function. */
- uint8_t stag_pcp;
+ uint16_t stag_vid;
/* S-TAG PCP value configured for the function. */
- uint8_t unused_2;
- uint16_t stag_tpid;
+ uint8_t stag_pcp;
+ uint8_t unused_1;
/*
- * S-TAG TPID value configured for the function. This field is
- * specified in network byte order.
+ * S-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
*/
- uint16_t ctag_vid;
+ uint16_t stag_tpid;
/* C-TAG VLAN identifier configured for the function. */
- uint8_t ctag_pcp;
+ uint16_t ctag_vid;
/* C-TAG PCP value configured for the function. */
- uint8_t unused_3;
- uint16_t ctag_tpid;
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
/*
- * C-TAG TPID value configured for the function. This field is
- * specified in network byte order.
+ * C-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
*/
- uint32_t rsvd1;
+ uint16_t ctag_tpid;
/* Future use. */
- uint32_t rsvd2;
+ uint32_t rsvd1;
/* Future use. */
- uint32_t unused_4;
-};
+ uint32_t rsvd2;
+ uint8_t unused_3[4];
+} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
struct hwrm_func_vlan_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
-};
+ uint8_t valid;
+} __attribute__((packed));
-/* hwrm_func_cfg */
-/*
- * Description: This command allows configuration of a PF by the corresponding
- * PF driver. This command also allows configuration of a child VF by its parent
- * PF driver. The input FID value is used to indicate what function is being
- * configured. This allows a PF driver to configure the PF owned by itself or a
- * virtual function that is a child of the PF. This command allows to reserve
- * resources for a VF by its parent PF. To reverse the process, the command
- * should be called with all enables flags cleared for resources. This will free
- * allocated resources for the VF and return them to the resource pool. If this
- * command is requested by a VF driver to configure or reserve resources, then
- * the HWRM shall fail this command. If default MAC address and/or VLAN are
- * provided in this command, then the HWRM shall set up appropriate MAC/VLAN
- * filters for the function that is being configured. If source properties
- * checks are enabled and default MAC address and/or IP address are provided in
- * this command, then the HWRM shall set appropriate source property checks
- * based on provided MAC and/or IP addresses. The parent PF driver should not
- * set MTU/MRU for a VF using this command. This is to allow MTU/MRU setting by
- * the VF driver. If the MTU or MRU for a VF is set by the PF driver, then the
- * HWRM should ignore it. A function's MTU/MRU should be set prior to allocating
- * RX VNICs or TX rings. A PF driver calls hwrm_func_cfg to allocate resources
- * for itself or its children VFs. All function drivers shall call hwrm_func_cfg
- * to reserve resources. A request to hwrm_func_cfg may not be fully granted;
- * that is, a request for resources may be larger than what can be supported by
- * the device and the HWRM will allocate the best set of resources available,
- * but that may be less than requested. If all the amounts requested could not
- * be fulfilled, the HWRM shall allocate what it could and return a status code
- * of success. A function driver should call hwrm_func_qcfg immediately after
- * hwrm_func_cfg to determine what resources were assigned to the configured
- * function. A call by a PF driver to hwrm_func_cfg to allocate resources for
- * itself shall only allocate resources for the PF driver to use, not for its
- * children VFs. Likewise, a call to hwrm_func_qcfg shall return the resources
- * available for the PF driver to use, not what is available to its children
- * VFs.
- */
-/* Input (88 bytes) */
+/*****************
+ * hwrm_func_cfg *
+ *****************/
+
+
+/* hwrm_func_cfg_input (size:704b/88B) */
struct hwrm_func_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function that is being configured. If set
- * to 0xFF... (All Fs), then the configuration is for the
- * requesting function.
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the the configuration is
+ * for the requesting function.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t flags;
+ uint16_t fid;
/*
- * When this bit is '1', the function is disabled with source
- * MAC address check. This is an anti-spoofing check. If this
- * flag is set, then the function shall be configured to
- * disallow transmission of frames with the source MAC address
- * that is configured for this function.
+ * This field specifies how many NQs will be reserved for the PF.
+ * Remaining NQs that belong to the PF become available for VFs.
+ * Once a PF has created VFs, it cannot change how many NQs are
+ * reserved for itself (since the NQs must be contiguous in HW).
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \
+ uint16_t num_msix;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function is disabled with
+ * source MAC address check.
+ * This is an anti-spoofing check. If this flag is set,
+ * then the function shall be configured to disallow
+ * transmission of frames with the source MAC address that
+ * is configured for this function.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \
UINT32_C(0x1)
/*
- * When this bit is '1', the function is enabled with source MAC
- * address check. This is an anti-spoofing check. If this flag
- * is set, then the function shall be configured to allow
- * transmission of frames with the source MAC address that is
- * configured for this function.
+ * When this bit is '1', the function is enabled with
+ * source MAC address check.
+ * This is an anti-spoofing check. If this flag is set,
+ * then the function shall be configured to allow
+ * transmission of frames with the source MAC address that
+ * is configured for this function.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \
UINT32_C(0x2)
- /* reserved */
- #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK UINT32_C(0x1fc)
- #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2
+ /* reserved. */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK \
+ UINT32_C(0x1fc)
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2
/*
* Standard TX Ring mode is used for the allocation of TX ring
* and underlying scheduling resources that allow bandwidth
- * reservation and limit settings on the queried function. If
- * set to 1, then standard TX ring mode is requested to be
+ * reservation and limit settings on the queried function.
+ * If set to 1, then standard TX ring mode is requested to be
* enabled on the function being configured.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \
UINT32_C(0x200)
/*
* Standard TX Ring mode is used for the allocation of TX ring
* and underlying scheduling resources that allow bandwidth
- * reservation and limit settings on the queried function. If
- * set to 1, then the standard TX ring mode is requested to be
- * disabled on the function being configured. In this extended
- * TX ring resource mode, the minimum and maximum bandwidth
- * settings are not supported to allow the allocation of TX
- * rings to span multiple scheduler nodes.
- */
- #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \
+ * reservation and limit settings on the queried function.
+ * If set to 1, then the standard TX ring mode is requested to
+ * be disabled on the function being configured. In this extended
+ * TX ring resource mode, the minimum and maximum bandwidth settings
+ * are not supported to allow the allocation of TX rings to
+ * span multiple scheduler nodes.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \
UINT32_C(0x400)
/*
- * If this bit is set, virtual mac address configured in this
- * command will be persistent over warm boot.
+ * If this bit is set, virtual mac address configured
+ * in this command will be persistent over warm boot.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST UINT32_C(0x800)
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST \
+ UINT32_C(0x800)
/*
- * This bit only applies to the VF. If this bit is set, the
- * statistic context counters will not be cleared when the
- * statistic context is freed or a function reset is called on
- * VF. This bit will be cleared when the PF is unloaded or a
- * function reset is called on the PF.
+ * This bit only applies to the VF. If this bit is set, the statistic
+ * context counters will not be cleared when the statistic context is freed
+ * or a function reset is called on VF. This bit will be cleared when the PF
+ * is unloaded or a function reset is called on the PF.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \
UINT32_C(0x1000)
/*
- * This bit requests that the firmware test to see if all the
- * assets requested in this command (i.e. number of TX rings)
- * are available. The firmware will return an error if the
- * requested assets are not available. The firwmare will NOT
- * reserve the assets if they are available.
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of TX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST \
+ UINT32_C(0x2000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RX_ASSETS_TEST \
+ UINT32_C(0x4000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of CMPL rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \
+ UINT32_C(0x8000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RSS ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \
+ UINT32_C(0x10000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of ring groups) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \
+ UINT32_C(0x20000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of stat ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \
+ UINT32_C(0x40000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of VNICs) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \
+ UINT32_C(0x80000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of L2 ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST UINT32_C(0x2000)
- uint32_t enables;
- /* This bit must be '1' for the mtu field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
- /* This bit must be '1' for the mru field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU UINT32_C(0x2)
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \
+ UINT32_C(0x100000)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the mtu field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the mru field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU \
+ UINT32_C(0x2)
/*
* This bit must be '1' for the num_rsscos_ctxs field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS UINT32_C(0x4)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \
+ UINT32_C(0x4)
/*
* This bit must be '1' for the num_cmpl_rings field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS UINT32_C(0x8)
- /* This bit must be '1' for the num_tx_rings field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS UINT32_C(0x10)
- /* This bit must be '1' for the num_rx_rings field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS UINT32_C(0x20)
- /* This bit must be '1' for the num_l2_ctxs field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS UINT32_C(0x40)
- /* This bit must be '1' for the num_vnics field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS UINT32_C(0x80)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the num_tx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the num_rx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the num_l2_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the num_vnics field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS \
+ UINT32_C(0x80)
/*
* This bit must be '1' for the num_stat_ctxs field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS UINT32_C(0x100)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS \
+ UINT32_C(0x100)
/*
* This bit must be '1' for the dflt_mac_addr field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x200)
- /* This bit must be '1' for the dflt_vlan field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN UINT32_C(0x400)
- /* This bit must be '1' for the dflt_ip_addr field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR UINT32_C(0x800)
- /* This bit must be '1' for the min_bw field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW UINT32_C(0x1000)
- /* This bit must be '1' for the max_bw field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW UINT32_C(0x2000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the dflt_vlan field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the dflt_ip_addr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the min_bw field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the max_bw field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW \
+ UINT32_C(0x2000)
/*
* This bit must be '1' for the async_event_cr field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \
+ UINT32_C(0x4000)
/*
* This bit must be '1' for the vlan_antispoof_mode field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE UINT32_C(0x8000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE \
+ UINT32_C(0x8000)
/*
* This bit must be '1' for the allowed_vlan_pris field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS UINT32_C(0x10000)
- /* This bit must be '1' for the evb_mode field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE UINT32_C(0x20000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS \
+ UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the evb_mode field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE \
+ UINT32_C(0x20000)
/*
* This bit must be '1' for the num_mcast_filters field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS UINT32_C(0x40000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS \
+ UINT32_C(0x40000)
/*
* This bit must be '1' for the num_hw_ring_grps field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS UINT32_C(0x80000)
- uint16_t mtu;
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \
+ UINT32_C(0x80000)
+ /*
+ * This bit must be '1' for the cache_linesize field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_CACHE_LINESIZE \
+ UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the num_msix field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX \
+ UINT32_C(0x200000)
+ /*
+ * The maximum transmission unit of the function.
+ * The HWRM should make sure that the mtu of
+ * the function does not exceed the mtu of the physical
+ * port that this function is associated with.
+ *
+ * In addition to configuring mtu per function, it is
+ * possible to configure mtu per transmit ring.
+ * By default, the mtu of each transmit ring associated
+ * with a function is equal to the mtu of the function.
+ * The HWRM should make sure that the mtu of each transmit
+ * ring that is assigned to a function has a valid mtu.
+ */
+ uint16_t mtu;
/*
- * The maximum transmission unit of the function. The HWRM
- * should make sure that the mtu of the function does not exceed
- * the mtu of the physical port that this function is associated
- * with. In addition to configuring mtu per function, it is
- * possible to configure mtu per transmit ring. By default, the
- * mtu of each transmit ring associated with a function is equal
- * to the mtu of the function. The HWRM should make sure that
- * the mtu of each transmit ring that is assigned to a function
- * has a valid mtu.
+ * The maximum receive unit of the function.
+ * The HWRM should make sure that the mru of
+ * the function does not exceed the mru of the physical
+ * port that this function is associated with.
+ *
+ * In addition to configuring mru per function, it is
+ * possible to configure mru per vnic.
+ * By default, the mru of each vnic associated
+ * with a function is equal to the mru of the function.
+ * The HWRM should make sure that the mru of each vnic
+ * that is assigned to a function has a valid mru.
*/
- uint16_t mru;
+ uint16_t mru;
/*
- * The maximum receive unit of the function. The HWRM should
- * make sure that the mru of the function does not exceed the
- * mru of the physical port that this function is associated
- * with. In addition to configuring mru per function, it is
- * possible to configure mru per vnic. By default, the mru of
- * each vnic associated with a function is equal to the mru of
- * the function. The HWRM should make sure that the mru of each
- * vnic that is assigned to a function has a valid mru.
+ * The number of RSS/COS contexts requested for the
+ * function.
*/
- uint16_t num_rsscos_ctxs;
- /* The number of RSS/COS contexts requested for the function. */
- uint16_t num_cmpl_rings;
+ uint16_t num_rsscos_ctxs;
/*
- * The number of completion rings requested for the function.
- * This does not include the rings allocated to any children
- * functions if any.
+ * The number of completion rings requested for the
+ * function. This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t num_tx_rings;
+ uint16_t num_cmpl_rings;
/*
- * The number of transmit rings requested for the function. This
- * does not include the rings allocated to any children
- * functions if any.
+ * The number of transmit rings requested for the function.
+ * This does not include the rings allocated to any
+ * children functions if any.
*/
- uint16_t num_rx_rings;
+ uint16_t num_tx_rings;
/*
- * The number of receive rings requested for the function. This
- * does not include the rings allocated to any children
- * functions if any.
+ * The number of receive rings requested for the function.
+ * This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t num_l2_ctxs;
+ uint16_t num_rx_rings;
/* The requested number of L2 contexts for the function. */
- uint16_t num_vnics;
+ uint16_t num_l2_ctxs;
/* The requested number of vnics for the function. */
- uint16_t num_stat_ctxs;
+ uint16_t num_vnics;
/* The requested number of statistic contexts for the function. */
- uint16_t num_hw_ring_grps;
+ uint16_t num_stat_ctxs;
/*
- * The number of HW ring groups that should be reserved for this
- * function.
+ * The number of HW ring groups that should
+ * be reserved for this function.
*/
- uint8_t dflt_mac_addr[6];
+ uint16_t num_hw_ring_grps;
/* The default MAC address for the function being configured. */
- uint16_t dflt_vlan;
+ uint8_t dflt_mac_addr[6];
/*
- * The default VLAN for the function being configured. This
- * field's format is same as 802.1Q Tag's Tag Control
- * Information (TCI) format that includes both Priority Code
- * Point (PCP) and VLAN Identifier (VID).
+ * The default VLAN for the function being configured.
+ * This field's format is same as 802.1Q Tag's
+ * Tag Control Information (TCI) format that includes both
+ * Priority Code Point (PCP) and VLAN Identifier (VID).
*/
- uint32_t dflt_ip_addr[4];
+ uint16_t dflt_vlan;
/*
* The default IP address for the function being configured.
* This address is only used in enabling source property check.
*/
- uint32_t min_bw;
+ uint32_t dflt_ip_addr[4];
/*
- * Minimum BW allocated for this function. The HWRM will
- * translate this value into byte counter and time interval used
- * for the scheduler inside the device.
+ * Minimum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
*/
+ uint32_t min_bw;
/* The bandwidth value. */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
#define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \
- FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES
+ HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
+ /* Value is in Kb or KB (base 10). */
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
+ /* Value is in Gb or GB (base 10). */
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
@@ -3522,39 +6349,42 @@ struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \
- FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID
- uint32_t max_bw;
+ HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID
/*
- * Maximum BW allocated for this function. The HWRM will
- * translate this value into byte counter and time interval used
- * for the scheduler inside the device.
+ * Maximum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
*/
+ uint32_t max_bw;
/* The bandwidth value. */
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \
UINT32_C(0xfffffff)
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
#define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \
- FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES
+ HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
+ /* Value is in Gb or GB (base 10). */
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
@@ -3564,1984 +6394,4481 @@ struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
- FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
- uint16_t async_event_cr;
+ HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
/*
* ID of the target completion ring for receiving asynchronous
- * event completions. If this field is not valid, then the HWRM
- * shall use the default completion ring of the function that is
- * being configured as the target completion ring for providing
- * any asynchronous event completions for that function. If this
- * field is valid, then the HWRM shall use the completion ring
- * identified by this ID as the target completion ring for
- * providing any asynchronous event completions for the function
- * that is being configured.
- */
- uint8_t vlan_antispoof_mode;
+ * event completions. If this field is not valid, then the
+ * HWRM shall use the default completion ring of the function
+ * that is being configured as the target completion ring for
+ * providing any asynchronous event completions for that
+ * function.
+ * If this field is valid, then the HWRM shall use the
+ * completion ring identified by this ID as the target
+ * completion ring for providing any asynchronous event
+ * completions for the function that is being configured.
+ */
+ uint16_t async_event_cr;
/* VLAN Anti-spoofing mode. */
+ uint8_t vlan_antispoof_mode;
/* No VLAN anti-spoofing checks are enabled */
- #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK UINT32_C(0x0)
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK \
+ UINT32_C(0x0)
/* Validate VLAN against the configured VLAN(s) */
#define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \
UINT32_C(0x1)
/* Insert VLAN if it does not exist, otherwise discard */
#define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \
UINT32_C(0x2)
- /*
- * Insert VLAN if it does not exist, override
- * VLAN if it exists
- */
- #define \
- HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \
+ /* Insert VLAN if it does not exist, override VLAN if it exists */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \
UINT32_C(0x3)
- uint8_t allowed_vlan_pris;
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_LAST \
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
/*
- * This bit field defines VLAN PRIs that are allowed on this
- * function. If nth bit is set, then VLAN PRI n is allowed on
+ * This bit field defines VLAN PRIs that are allowed on
* this function.
+ * If nth bit is set, then VLAN PRI n is allowed on this
+ * function.
*/
- uint8_t evb_mode;
+ uint8_t allowed_vlan_pris;
/*
* The HWRM shall allow a PF driver to change EVB mode for the
- * partition it belongs to. The HWRM shall not allow a VF driver
- * to change the EVB mode. The HWRM shall take into account the
- * switching of EVB mode from one to another and reconfigure
- * hardware resources as appropriately. The switching from VEB
- * to VEPA mode requires the disabling of the loopback traffic.
- * Additionally, source knock outs are handled differently in
- * VEB and VEPA modes.
- */
- /* No Edge Virtual Bridging (EVB) */
- #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
- /* Virtual Ethernet Bridge (VEB) */
- #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1)
- /* Virtual Ethernet Port Aggregator (VEPA) */
- #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2)
- uint8_t unused_2;
- uint16_t num_mcast_filters;
+ * partition it belongs to.
+ * The HWRM shall not allow a VF driver to change the EVB mode.
+ * The HWRM shall take into account the switching of EVB mode
+ * from one to another and reconfigure hardware resources as
+ * appropriately.
+ * The switching from VEB to VEPA mode requires
+ * the disabling of the loopback traffic. Additionally,
+ * source knock outs are handled differently in VEB and VEPA
+ * modes.
+ */
+ uint8_t evb_mode;
+ /* No Edge Virtual Bridging (EVB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
+ /* Virtual Ethernet Bridge (VEB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1)
+ /* Virtual Ethernet Port Aggregator (VEPA) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2)
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_LAST \
+ HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA
+ uint8_t options;
+ /*
+ * This value indicates the PCIE device cache line size.
+ * The cache line size allows the DMA writes to terminate and
+ * start at the cache boundary.
+ */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_MASK \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SFT 0
+ /* Cache Line Size 64 bytes */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \
+ UINT32_C(0x0)
+ /* Cache Line Size 128 bytes */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \
+ UINT32_C(0x1)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_LAST \
+ HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128
+ /* Reserved for future. */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_SFT 2
/*
- * The number of multicast filters that should be reserved for
- * this function on the RX side.
+ * The number of multicast filters that should
+ * be reserved for this function on the RX side.
*/
+ uint16_t num_mcast_filters;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_cfg_output (size:128b/16B) */
struct hwrm_func_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_qstats */
-/*
- * Description: This command returns statistics of a function. The input FID
- * value is used to indicate what function is being queried. This allows a
- * physical function driver to query virtual functions that are children of the
- * physical function. The HWRM shall return any unsupported counter with a value
- * of 0xFFFFFFFF for 32-bit counters and 0xFFFFFFFFFFFFFFFF for 64-bit counters.
- */
-/* Input (24 bytes) */
+/********************
+ * hwrm_func_qstats *
+ ********************/
+
+
+/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint16_t unused_0[3];
+ uint16_t fid;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (176 bytes) */
+/* hwrm_func_qstats_output (size:1408b/176B) */
struct hwrm_func_qstats_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint64_t tx_ucast_pkts;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* Number of transmitted unicast packets on the function. */
- uint64_t tx_mcast_pkts;
+ uint64_t tx_ucast_pkts;
/* Number of transmitted multicast packets on the function. */
- uint64_t tx_bcast_pkts;
+ uint64_t tx_mcast_pkts;
/* Number of transmitted broadcast packets on the function. */
- uint64_t tx_err_pkts;
+ uint64_t tx_bcast_pkts;
/*
* Number of transmitted packets that were discarded due to
- * internal NIC resource problems. For transmit, this can only
- * happen if TMP is configured to allow dropping in HOL blocking
- * conditions, which is not a normal configuration.
+ * internal NIC resource problems. For transmit, this
+ * can only happen if TMP is configured to allow dropping
+ * in HOL blocking conditions, which is not a normal
+ * configuration.
*/
- uint64_t tx_drop_pkts;
+ uint64_t tx_discard_pkts;
/*
* Number of dropped packets on transmit path on the function.
- * These are packets that have been marked for drop by the TE
- * CFA block or are packets that exceeded the transmit MTU limit
- * for the function.
+ * These are packets that have been marked for drop by
+ * the TE CFA block or are packets that exceeded the
+ * transmit MTU limit for the function.
*/
- uint64_t tx_ucast_bytes;
+ uint64_t tx_drop_pkts;
/* Number of transmitted bytes for unicast traffic on the function. */
- uint64_t tx_mcast_bytes;
- /*
- * Number of transmitted bytes for multicast traffic on the
- * function.
- */
- uint64_t tx_bcast_bytes;
- /*
- * Number of transmitted bytes for broadcast traffic on the
- * function.
- */
- uint64_t rx_ucast_pkts;
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic on the function. */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic on the function. */
+ uint64_t tx_bcast_bytes;
/* Number of received unicast packets on the function. */
- uint64_t rx_mcast_pkts;
+ uint64_t rx_ucast_pkts;
/* Number of received multicast packets on the function. */
- uint64_t rx_bcast_pkts;
+ uint64_t rx_mcast_pkts;
/* Number of received broadcast packets on the function. */
- uint64_t rx_err_pkts;
+ uint64_t rx_bcast_pkts;
/*
- * Number of received packets that were discarded on the
- * function due to resource limitations. This can happen for 3
- * reasons. # The BD used for the packet has a bad format. #
- * There were no BDs available in the ring for the packet. #
- * There were no BDs available on-chip for the packet.
+ * Number of received packets that were discarded on the function
+ * due to resource limitations. This can happen for 3 reasons.
+ * # The BD used for the packet has a bad format.
+ * # There were no BDs available in the ring for the packet.
+ * # There were no BDs available on-chip for the packet.
*/
- uint64_t rx_drop_pkts;
+ uint64_t rx_discard_pkts;
/*
* Number of dropped packets on received path on the function.
- * These are packets that have been marked for drop by the RE
- * CFA.
+ * These are packets that have been marked for drop by the
+ * RE CFA.
*/
- uint64_t rx_ucast_bytes;
+ uint64_t rx_drop_pkts;
/* Number of received bytes for unicast traffic on the function. */
- uint64_t rx_mcast_bytes;
+ uint64_t rx_ucast_bytes;
/* Number of received bytes for multicast traffic on the function. */
- uint64_t rx_bcast_bytes;
+ uint64_t rx_mcast_bytes;
/* Number of received bytes for broadcast traffic on the function. */
- uint64_t rx_agg_pkts;
+ uint64_t rx_bcast_bytes;
/* Number of aggregated unicast packets on the function. */
- uint64_t rx_agg_bytes;
+ uint64_t rx_agg_pkts;
/* Number of aggregated unicast bytes on the function. */
- uint64_t rx_agg_events;
+ uint64_t rx_agg_bytes;
/* Number of aggregation events on the function. */
- uint64_t rx_agg_aborts;
+ uint64_t rx_agg_events;
/* Number of aborted aggregations on the function. */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint64_t rx_agg_aborts;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_clr_stats */
-/*
- * Description: This command clears statistics of a function. The input FID
- * value is used to indicate what function's statistics is being cleared. This
- * allows a physical function driver to clear statistics of virtual functions
- * that are children of the physical function.
- */
-/* Input (24 bytes) */
+/***********************
+ * hwrm_func_clr_stats *
+ ***********************/
+
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function. 0xFF... (All Fs) if the query is
- * for the requesting function.
+ * Function ID of the function.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint16_t unused_0[3];
+ uint16_t fid;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_clr_stats_output (size:128b/16B) */
struct hwrm_func_clr_stats_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_func_vf_resc_free *
+ **************************/
+
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+struct hwrm_func_vf_resc_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t vf_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+struct hwrm_func_vf_resc_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_vf_vnic_ids_query */
-/* Description: This command is used to query vf vnic ids. */
-/* Input (32 bytes) */
+/*******************************
+ * hwrm_func_vf_vnic_ids_query *
+ *******************************/
+
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
struct hwrm_func_vf_vnic_ids_query_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t vf_id;
+ uint64_t resp_addr;
/*
- * This value is used to identify a Virtual Function (VF). The
- * scope of VF ID is local within a PF.
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t max_vnic_id_cnt;
+ uint16_t vf_id;
+ uint8_t unused_0[2];
/* Max number of vnic ids in vnic id table */
- uint64_t vnic_id_tbl_addr;
+ uint32_t max_vnic_id_cnt;
/* This is the address for VF VNIC ID table */
+ uint64_t vnic_id_tbl_addr;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
struct hwrm_func_vf_vnic_ids_query_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * Actual number of vnic ids
+ *
+ * Each VNIC ID is written as a 32-bit number.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint32_t vnic_id_cnt;
+ uint8_t unused_0[3];
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint32_t vnic_id_cnt;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_drv_rgtr *
+ **********************/
+
+
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
+struct hwrm_func_drv_rgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Actual number of vnic ids Each VNIC ID is written as a 32-bit
- * number.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
-} __attribute__((packed));
-
-/* hwrm_func_drv_rgtr */
-/*
- * Description: This command is used by the function driver to register its
- * information with the HWRM. A function driver shall implement this command. A
- * function driver shall use this command during the driver initialization right
- * after the HWRM version discovery and default ring resources allocation.
- */
-/* Input (80 bytes) */
-struct hwrm_func_drv_rgtr_input {
- uint16_t req_type;
+ uint16_t seq_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t cmpl_ring;
+ uint16_t target_id;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * When this bit is '1', the function driver is requesting
+ * all requests from its children VF drivers to be
+ * forwarded to itself.
+ * This flag can only be set by the PF driver.
+ * If a VF driver sets this flag, it should be ignored
+ * by the HWRM.
*/
- uint64_t resp_addr;
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * When this bit is '1', the function is requesting none of
+ * the requests from its children VF drivers to be
+ * forwarded to itself.
+ * This flag can only be set by the PF driver.
+ * If a VF driver sets this flag, it should be ignored
+ * by the HWRM.
*/
- uint32_t flags;
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
/*
- * When this bit is '1', the function driver is requesting all
- * requests from its children VF drivers to be forwarded to
- * itself. This flag can only be set by the PF driver. If a VF
- * driver sets this flag, it should be ignored by the HWRM.
+ * When this bit is '1', then ver_maj_8b, ver_min_8b, ver_upd_8b
+ * fields shall be ignored and ver_maj, ver_min, ver_upd
+ * and ver_patch shall be used for the driver version information.
+ * When this bit is '0', then ver_maj_8b, ver_min_8b, ver_upd_8b
+ * fields shall be used for the driver version information and
+ * ver_maj, ver_min, ver_upd and ver_patch shall be ignored.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_16BIT_VER_MODE UINT32_C(0x4)
+ uint32_t enables;
/*
- * When this bit is '1', the function is requesting none of the
- * requests from its children VF drivers to be forwarded to
- * itself. This flag can only be set by the PF driver. If a VF
- * driver sets this flag, it should be ignored by the HWRM.
+ * This bit must be '1' for the os_type field to be
+ * configured.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
- uint32_t enables;
- /* This bit must be '1' for the os_type field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE UINT32_C(0x1)
- /* This bit must be '1' for the ver field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER UINT32_C(0x2)
- /* This bit must be '1' for the timestamp field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP UINT32_C(0x4)
- /* This bit must be '1' for the vf_req_fwd field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD UINT32_C(0x8)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE \
+ UINT32_C(0x1)
/*
- * This bit must be '1' for the async_event_fwd field to be
+ * This bit must be '1' for the ver field to be
* configured.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10)
- uint16_t os_type;
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the timestamp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP \
+ UINT32_C(0x4)
/*
- * This value indicates the type of OS. The values are based on
- * CIM_OperatingSystem.mof file as published by the DMTF.
+ * This bit must be '1' for the vf_req_fwd field to be
+ * configured.
*/
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the async_event_fwd field to be
+ * configured.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \
+ UINT32_C(0x10)
+ /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ uint16_t os_type;
/* Unknown */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
/* Other OS not listed below. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1)
/* MSDOS OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe)
/* Windows OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12)
/* Solaris OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d)
/* Linux OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24)
/* FreeBSD OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a)
/* VMware ESXi OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68)
/* Microsoft Windows 8 64-bit OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73)
/* Microsoft Windows Server 2012 R2 OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74)
- uint8_t ver_maj;
- /* This is the major version of the driver. */
- uint8_t ver_min;
- /* This is the minor version of the driver. */
- uint8_t ver_upd;
- /* This is the update version of the driver. */
- uint8_t unused_0;
- uint16_t unused_1;
- uint32_t timestamp;
- /*
- * This is a 32-bit timestamp provided by the driver for keep
- * alive. The timestamp is in multiples of 1ms.
- */
- uint32_t unused_2;
- uint32_t vf_req_fwd[8];
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74)
+ /* UEFI driver. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI UINT32_C(0x8000)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LAST \
+ HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI
+ /* This is the 8bit major version of the driver. */
+ uint8_t ver_maj_8b;
+ /* This is the 8bit minor version of the driver. */
+ uint8_t ver_min_8b;
+ /* This is the 8bit update version of the driver. */
+ uint8_t ver_upd_8b;
+ uint8_t unused_0[3];
+ /*
+ * This is a 32-bit timestamp provided by the driver for
+ * keep alive.
+ * The timestamp is in multiples of 1ms.
+ */
+ uint32_t timestamp;
+ uint8_t unused_1[4];
/*
* This is a 256-bit bit mask provided by the PF driver for
* letting the HWRM know what commands issued by the VF driver
- * to the HWRM should be forwarded to the PF driver. Nth bit
- * refers to the Nth req_type. Setting Nth bit to 1 indicates
- * that requests from the VF driver with req_type equal to N
- * shall be forwarded to the parent PF driver. This field is not
- * valid for the VF driver.
+ * to the HWRM should be forwarded to the PF driver.
+ * Nth bit refers to the Nth req_type.
+ *
+ * Setting Nth bit to 1 indicates that requests from the
+ * VF driver with req_type equal to N shall be forwarded to
+ * the parent PF driver.
+ *
+ * This field is not valid for the VF driver.
*/
- uint32_t async_event_fwd[8];
+ uint32_t vf_req_fwd[8];
/*
* This is a 256-bit bit mask provided by the function driver
- * (PF or VF driver) to indicate the list of asynchronous event
- * completions to be forwarded. Nth bit refers to the Nth
- * event_id. Setting Nth bit to 1 by the function driver shall
- * result in the HWRM forwarding asynchronous event completion
- * with event_id equal to N. If all bits are set to 0 (value of
- * 0), then the HWRM shall not forward any asynchronous event
- * completion to this function driver.
- */
+ * (PF or VF driver) to indicate the list of asynchronous event
+ * completions to be forwarded.
+ *
+ * Nth bit refers to the Nth event_id.
+ *
+ * Setting Nth bit to 1 by the function driver shall result in
+ * the HWRM forwarding asynchronous event completion with
+ * event_id equal to N.
+ *
+ * If all bits are set to 0 (value of 0), then the HWRM shall
+ * not forward any asynchronous event completion to this
+ * function driver.
+ */
+ uint32_t async_event_fwd[8];
+ /* This is the 16bit major version of the driver. */
+ uint16_t ver_maj;
+ /* This is the 16bit minor version of the driver. */
+ uint16_t ver_min;
+ /* This is the 16bit update version of the driver. */
+ uint16_t ver_upd;
+ /* This is the 16bit patch version of the driver. */
+ uint16_t ver_patch;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
struct hwrm_func_drv_rgtr_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_drv_unrgtr */
-/*
- * Description: This command is used by the function driver to un register with
- * the HWRM. A function driver shall implement this command. A function driver
- * shall use this command during the driver unloading.
- */
-/* Input (24 bytes) */
+/************************
+ * hwrm_func_drv_unrgtr *
+ ************************/
+
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
struct hwrm_func_drv_unrgtr_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * When this bit is '1', the function driver is notifying the
- * HWRM to prepare for the shutdown.
+ * When this bit is '1', the function driver is notifying
+ * the HWRM to prepare for the shutdown.
*/
- #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
+ #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
UINT32_C(0x1)
- uint32_t unused_0;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
struct hwrm_func_drv_unrgtr_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_buf_rgtr *
+ **********************/
+
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+struct hwrm_func_buf_rgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
-} __attribute__((packed));
-
-/* hwrm_func_buf_rgtr */
-/*
- * Description: This command is used by the PF driver to register buffers used
- * in the PF-VF communication with the HWRM. The PF driver uses this command to
- * register buffers for each PF-VF channel. A parent PF may issue this command
- * per child VF. If VF ID is not valid, then this command is used to register
- * buffers for all children VFs of the PF.
- */
-/* Input (128 bytes) */
-struct hwrm_func_buf_rgtr_input {
- uint16_t req_type;
+ uint16_t seq_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t cmpl_ring;
+ uint16_t target_id;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * This bit must be '1' for the vf_id field to be
+ * configured.
*/
- uint64_t resp_addr;
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * This bit must be '1' for the err_buf_addr field to be
+ * configured.
*/
- uint32_t enables;
- /* This bit must be '1' for the vf_id field to be configured. */
- #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
- /* This bit must be '1' for the err_buf_addr field to be configured. */
- #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2)
- uint16_t vf_id;
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2)
/*
- * This value is used to identify a Virtual Function (VF). The
- * scope of VF ID is local within a PF.
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
*/
- uint16_t req_buf_num_pages;
+ uint16_t vf_id;
/*
* This field represents the number of pages used for request
* buffer(s).
*/
- uint16_t req_buf_page_size;
- /* This field represents the page size used for request buffer(s). */
+ uint16_t req_buf_num_pages;
+ /*
+ * This field represents the page size used for request
+ * buffer(s).
+ */
+ uint16_t req_buf_page_size;
/* 16 bytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_16B UINT32_C(0x4)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_16B UINT32_C(0x4)
/* 4 Kbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4K UINT32_C(0xc)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4K UINT32_C(0xc)
/* 8 Kbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_8K UINT32_C(0xd)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_8K UINT32_C(0xd)
/* 64 Kbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_64K UINT32_C(0x10)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_64K UINT32_C(0x10)
/* 2 Mbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_2M UINT32_C(0x15)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_2M UINT32_C(0x15)
/* 4 Mbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4M UINT32_C(0x16)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4M UINT32_C(0x16)
/* 1 Gbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_1G UINT32_C(0x1e)
- uint16_t req_buf_len;
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G UINT32_C(0x1e)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_LAST \
+ HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G
/* The length of the request buffer per VF in bytes. */
- uint16_t resp_buf_len;
+ uint16_t req_buf_len;
/* The length of the response buffer in bytes. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint64_t req_buf_page_addr[10];
- /* This field represents the page address of req buffer. */
- uint64_t error_buf_addr;
+ uint16_t resp_buf_len;
+ uint8_t unused_0[2];
+ /* This field represents the page address of page #0. */
+ uint64_t req_buf_page_addr0;
+ /* This field represents the page address of page #1. */
+ uint64_t req_buf_page_addr1;
+ /* This field represents the page address of page #2. */
+ uint64_t req_buf_page_addr2;
+ /* This field represents the page address of page #3. */
+ uint64_t req_buf_page_addr3;
+ /* This field represents the page address of page #4. */
+ uint64_t req_buf_page_addr4;
+ /* This field represents the page address of page #5. */
+ uint64_t req_buf_page_addr5;
+ /* This field represents the page address of page #6. */
+ uint64_t req_buf_page_addr6;
+ /* This field represents the page address of page #7. */
+ uint64_t req_buf_page_addr7;
+ /* This field represents the page address of page #8. */
+ uint64_t req_buf_page_addr8;
+ /* This field represents the page address of page #9. */
+ uint64_t req_buf_page_addr9;
+ /*
+ * This field is used to receive the error reporting from
+ * the chipset. Only applicable for PFs.
+ */
+ uint64_t error_buf_addr;
+ /*
+ * This field is used to receive the response forwarded by the
+ * HWRM.
+ */
+ uint64_t resp_buf_addr;
+} __attribute__((packed));
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+struct hwrm_func_buf_rgtr_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used to receive the error reporting from the
- * chipset. Only applicable for PFs.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint64_t resp_buf_addr;
- /* This field is used to receive the response forwarded by the HWRM. */
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_func_buf_rgtr_output {
- uint16_t error_code;
+/************************
+ * hwrm_func_buf_unrgtr *
+ ************************/
+
+
+/* hwrm_func_buf_unrgtr_input (size:192b/24B) */
+struct hwrm_func_buf_unrgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This bit must be '1' for the vf_id field to be
+ * configured.
*/
+ #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t vf_id;
+ uint8_t unused_0[2];
} __attribute__((packed));
-/* hwrm_func_buf_unrgtr */
-/*
- * Description: This command is used by the PF driver to unregister buffers used
- * in the PF-VF communication with the HWRM. The PF driver uses this command to
- * unregister buffers for PF-VF communication. A parent PF may issue this
- * command to unregister buffers for communication between the PF and a specific
- * VF. If the VF ID is not valid, then this command is used to unregister
- * buffers used for communications with all children VFs of the PF.
- */
-/* Input (24 bytes) */
-struct hwrm_func_buf_unrgtr_input {
- uint16_t req_type;
+/* hwrm_func_buf_unrgtr_output (size:128b/16B) */
+struct hwrm_func_buf_unrgtr_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_drv_qver *
+ **********************/
+
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
+struct hwrm_func_drv_qver_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t enables;
- /* This bit must be '1' for the vf_id field to be configured. */
- #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
- uint16_t vf_id;
+ uint64_t resp_addr;
+ /* Reserved for future use. */
+ uint32_t reserved;
/*
- * This value is used to identify a Virtual Function (VF). The
- * scope of VF ID is local within a PF.
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint16_t unused_0;
+ uint16_t fid;
+ uint8_t unused_0[2];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_func_buf_unrgtr_output {
- uint16_t error_code;
+/* hwrm_func_drv_qver_output (size:192b/24B) */
+struct hwrm_func_drv_qver_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ uint16_t os_type;
+ /* Unknown */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
+ /* Other OS not listed below. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_OTHER UINT32_C(0x1)
+ /* MSDOS OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_MSDOS UINT32_C(0xe)
+ /* Windows OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WINDOWS UINT32_C(0x12)
+ /* Solaris OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_SOLARIS UINT32_C(0x1d)
+ /* Linux OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LINUX UINT32_C(0x24)
+ /* FreeBSD OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_FREEBSD UINT32_C(0x2a)
+ /* VMware ESXi OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_ESXI UINT32_C(0x68)
+ /* Microsoft Windows 8 64-bit OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN864 UINT32_C(0x73)
+ /* Microsoft Windows Server 2012 R2 OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74)
+ /* UEFI driver. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI UINT32_C(0x8000)
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LAST \
+ HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI
+ /* This is the 8bit major version of the driver. */
+ uint8_t ver_maj_8b;
+ /* This is the 8bit minor version of the driver. */
+ uint8_t ver_min_8b;
+ /* This is the 8bit update version of the driver. */
+ uint8_t ver_upd_8b;
+ uint8_t unused_0[2];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+ /* This is the 16bit major version of the driver. */
+ uint16_t ver_maj;
+ /* This is the 16bit minor version of the driver. */
+ uint16_t ver_min;
+ /* This is the 16bit update version of the driver. */
+ uint16_t ver_upd;
+ /* This is the 16bit patch version of the driver. */
+ uint16_t ver_patch;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_func_resource_qcaps *
+ ****************************/
+
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
+ uint16_t fid;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_port_phy_cfg */
-/*
- * Description: This command configures the PHY device for the port. It allows
- * setting of the most generic settings for the PHY. The HWRM shall complete
- * this command as soon as PHY settings are configured. They may not be applied
- * when the command response is provided. A VF driver shall not be allowed to
- * configure PHY using this command. In a network partition mode, a PF driver
- * shall not be allowed to configure PHY using this command.
- */
-/* Input (56 bytes) */
-struct hwrm_port_phy_cfg_input {
- uint16_t req_type;
+/* hwrm_func_resource_qcaps_output (size:448b/56B) */
+struct hwrm_func_resource_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Maximum guaranteed number of VFs supported by PF. Not applicable for VFs. */
+ uint16_t max_vfs;
+ /* Maximum guaranteed number of MSI-X vectors supported by function */
+ uint16_t max_msix;
+ /* Hint of strategy to be used by PF driver to reserve resources for its VF */
+ uint16_t vf_reservation_strategy;
+ /* The PF driver should evenly divide its remaining resources among all VFs. */
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL \
+ UINT32_C(0x0)
+ /* The PF driver should only reserve minimal resources for each VF. */
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL \
+ UINT32_C(0x1)
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The PF driver should not reserve any resources for each VF until the
+ * the VF interface is brought up.
*/
- uint16_t cmpl_ring;
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_LAST \
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+ /* Minimum guaranteed number of RSS/COS contexts */
+ uint16_t min_rsscos_ctx;
+ /* Maximum non-guaranteed number of RSS/COS contexts */
+ uint16_t max_rsscos_ctx;
+ /* Minimum guaranteed number of completion rings */
+ uint16_t min_cmpl_rings;
+ /* Maximum non-guaranteed number of completion rings */
+ uint16_t max_cmpl_rings;
+ /* Minimum guaranteed number of transmit rings */
+ uint16_t min_tx_rings;
+ /* Maximum non-guaranteed number of transmit rings */
+ uint16_t max_tx_rings;
+ /* Minimum guaranteed number of receive rings */
+ uint16_t min_rx_rings;
+ /* Maximum non-guaranteed number of receive rings */
+ uint16_t max_rx_rings;
+ /* Minimum guaranteed number of L2 contexts */
+ uint16_t min_l2_ctxs;
+ /* Maximum non-guaranteed number of L2 contexts */
+ uint16_t max_l2_ctxs;
+ /* Minimum guaranteed number of VNICs */
+ uint16_t min_vnics;
+ /* Maximum non-guaranteed number of VNICs */
+ uint16_t max_vnics;
+ /* Minimum guaranteed number of statistic contexts */
+ uint16_t min_stat_ctx;
+ /* Maximum non-guaranteed number of statistic contexts */
+ uint16_t max_stat_ctx;
+ /* Minimum guaranteed number of ring groups */
+ uint16_t min_hw_ring_grps;
+ /* Maximum non-guaranteed number of ring groups */
+ uint16_t max_hw_ring_grps;
+ /*
+ * Maximum number of inputs into the transmit scheduler for this function.
+ * The number of TX rings assigned to the function cannot exceed this value.
+ */
+ uint16_t max_tx_scheduler_inputs;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_func_vf_resource_cfg *
+ *****************************/
+
+
+/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_resource_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t flags;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF ID that is being configured by PF */
+ uint16_t vf_id;
+ /* Maximum guaranteed number of MSI-X vectors for the function */
+ uint16_t max_msix;
+ /* Minimum guaranteed number of RSS/COS contexts */
+ uint16_t min_rsscos_ctx;
+ /* Maximum non-guaranteed number of RSS/COS contexts */
+ uint16_t max_rsscos_ctx;
+ /* Minimum guaranteed number of completion rings */
+ uint16_t min_cmpl_rings;
+ /* Maximum non-guaranteed number of completion rings */
+ uint16_t max_cmpl_rings;
+ /* Minimum guaranteed number of transmit rings */
+ uint16_t min_tx_rings;
+ /* Maximum non-guaranteed number of transmit rings */
+ uint16_t max_tx_rings;
+ /* Minimum guaranteed number of receive rings */
+ uint16_t min_rx_rings;
+ /* Maximum non-guaranteed number of receive rings */
+ uint16_t max_rx_rings;
+ /* Minimum guaranteed number of L2 contexts */
+ uint16_t min_l2_ctxs;
+ /* Maximum non-guaranteed number of L2 contexts */
+ uint16_t max_l2_ctxs;
+ /* Minimum guaranteed number of VNICs */
+ uint16_t min_vnics;
+ /* Maximum non-guaranteed number of VNICs */
+ uint16_t max_vnics;
+ /* Minimum guaranteed number of statistic contexts */
+ uint16_t min_stat_ctx;
+ /* Maximum non-guaranteed number of statistic contexts */
+ uint16_t max_stat_ctx;
+ /* Minimum guaranteed number of ring groups */
+ uint16_t min_hw_ring_grps;
+ /* Maximum non-guaranteed number of ring groups */
+ uint16_t max_hw_ring_grps;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+struct hwrm_func_vf_resource_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Reserved number of RSS/COS contexts */
+ uint16_t reserved_rsscos_ctx;
+ /* Reserved number of completion rings */
+ uint16_t reserved_cmpl_rings;
+ /* Reserved number of transmit rings */
+ uint16_t reserved_tx_rings;
+ /* Reserved number of receive rings */
+ uint16_t reserved_rx_rings;
+ /* Reserved number of L2 contexts */
+ uint16_t reserved_l2_ctxs;
+ /* Reserved number of VNICs */
+ uint16_t reserved_vnics;
+ /* Reserved number of statistic contexts */
+ uint16_t reserved_stat_ctx;
+ /* Reserved number of ring groups */
+ uint16_t reserved_hw_ring_grps;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************************
+ * hwrm_func_backing_store_qcaps *
+ *********************************/
+
+
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_func_backing_store_qcaps_output (size:512b/64B) */
+struct hwrm_func_backing_store_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Maximum number of QP context entries supported for this function. */
+ uint32_t qp_max_entries;
+ /*
+ * Minimum number of QP context entries that are needed to be reserved
+ * for QP1 for the PF and its VFs. PF drivers must allocate at least
+ * this many QP context entries, even if RoCE will not be used.
+ */
+ uint16_t qp_min_qp1_entries;
+ /* Maximum number of QP context entries that can be used for L2. */
+ uint16_t qp_max_l2_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t qp_entry_size;
+ /* Maximum number of SRQ context entries that can be used for L2. */
+ uint16_t srq_max_l2_entries;
+ /* Maximum number of SRQ context entries supported for this function. */
+ uint32_t srq_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t srq_entry_size;
+ /* Maximum number of CQ context entries that can be used for L2. */
+ uint16_t cq_max_l2_entries;
+ /* Maximum number of CQ context entries supported for this function. */
+ uint32_t cq_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t cq_entry_size;
+ /* Maximum number of VNIC context entries supported for this function. */
+ uint16_t vnic_max_vnic_entries;
+ /* Maximum number of Ring table context entries supported for this function. */
+ uint16_t vnic_max_ring_table_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t vnic_entry_size;
+ /* Maximum number of statistic context entries supported for this function. */
+ uint32_t stat_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t stat_entry_size;
+ /* Maximum number of TQM context entries supported per ring. */
+ uint16_t tqm_max_entries_per_ring;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t tqm_entry_size;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t mrav_entry_size;
+ /* Maximum number of MR/AV context entries supported for this function. */
+ uint32_t mrav_max_entries;
+ /* Maximum number of Timer context entries supported for this function. */
+ uint32_t tim_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t tim_entry_size;
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_func_backing_store_cfg *
+ *******************************/
+
+
+/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+struct hwrm_func_backing_store_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When set, the firmware only uses on-chip resources and does not
+ * expect any backing store to be provided by the host driver. This
+ * mode provides minimal L2 functionality (e.g. limited L2 resources,
+ * no RoCE).
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_PREBOOT_MODE \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the qp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the srq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the vnic fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stat fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the tqm_sp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the tqm_ring0 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING0 \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tqm_ring1 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING1 \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the tqm_ring2 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING2 \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the tqm_ring3 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING3 \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the tqm_ring4 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING4 \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the tqm_ring5 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING5 \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the tqm_ring6 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING6 \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the tqm_ring7 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING7 \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the mrav fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the tim fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM \
+ UINT32_C(0x8000)
+ /* QPC page size and level. */
+ uint8_t qpc_pg_size_qpc_lvl;
+ /* QPC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2
+ /* QPC page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G
+ /* SRQ page size and level. */
+ uint8_t srq_pg_size_srq_lvl;
+ /* SRQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2
+ /* SRQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G
+ /* CQ page size and level. */
+ uint8_t cq_pg_size_cq_lvl;
+ /* CQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2
+ /* CQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G
+ /* VNIC page size and level. */
+ uint8_t vnic_pg_size_vnic_lvl;
+ /* VNIC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2
+ /* VNIC page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G
+ /* Stat page size and level. */
+ uint8_t stat_pg_size_stat_lvl;
+ /* Stat PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2
+ /* Stat page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G
+ /* TQM slow path page size and level. */
+ uint8_t tqm_sp_pg_size_tqm_sp_lvl;
+ /* TQM slow path PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2
+ /* TQM slow path page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G
+ /* TQM ring 0 page size and level. */
+ uint8_t tqm_ring0_pg_size_tqm_ring0_lvl;
+ /* TQM ring 0 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2
+ /* TQM ring 0 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G
+ /* TQM ring 1 page size and level. */
+ uint8_t tqm_ring1_pg_size_tqm_ring1_lvl;
+ /* TQM ring 1 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2
+ /* TQM ring 1 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G
+ /* TQM ring 2 page size and level. */
+ uint8_t tqm_ring2_pg_size_tqm_ring2_lvl;
+ /* TQM ring 2 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2
+ /* TQM ring 2 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G
+ /* TQM ring 3 page size and level. */
+ uint8_t tqm_ring3_pg_size_tqm_ring3_lvl;
+ /* TQM ring 3 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2
+ /* TQM ring 3 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G
+ /* TQM ring 4 page size and level. */
+ uint8_t tqm_ring4_pg_size_tqm_ring4_lvl;
+ /* TQM ring 4 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2
+ /* TQM ring 4 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G
+ /* TQM ring 5 page size and level. */
+ uint8_t tqm_ring5_pg_size_tqm_ring5_lvl;
+ /* TQM ring 5 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2
+ /* TQM ring 5 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G
+ /* TQM ring 6 page size and level. */
+ uint8_t tqm_ring6_pg_size_tqm_ring6_lvl;
+ /* TQM ring 6 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2
+ /* TQM ring 6 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G
+ /* TQM ring 7 page size and level. */
+ uint8_t tqm_ring7_pg_size_tqm_ring7_lvl;
+ /* TQM ring 7 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2
+ /* TQM ring 7 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G
+ /* MR/AV page size and level. */
+ uint8_t mrav_pg_size_mrav_lvl;
+ /* MR/AV PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2
+ /* MR/AV page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G
+ /* Timer page size and level. */
+ uint8_t tim_pg_size_tim_lvl;
+ /* Timer PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2
+ /* Timer page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G
+ /* QP page directory. */
+ uint64_t qpc_page_dir;
+ /* SRQ page directory. */
+ uint64_t srq_page_dir;
+ /* CQ page directory. */
+ uint64_t cq_page_dir;
+ /* VNIC page directory. */
+ uint64_t vnic_page_dir;
+ /* Stat page directory. */
+ uint64_t stat_page_dir;
+ /* TQM slowpath page directory. */
+ uint64_t tqm_sp_page_dir;
+ /* TQM ring 0 page directory. */
+ uint64_t tqm_ring0_page_dir;
+ /* TQM ring 1 page directory. */
+ uint64_t tqm_ring1_page_dir;
+ /* TQM ring 2 page directory. */
+ uint64_t tqm_ring2_page_dir;
+ /* TQM ring 3 page directory. */
+ uint64_t tqm_ring3_page_dir;
+ /* TQM ring 4 page directory. */
+ uint64_t tqm_ring4_page_dir;
+ /* TQM ring 5 page directory. */
+ uint64_t tqm_ring5_page_dir;
+ /* TQM ring 6 page directory. */
+ uint64_t tqm_ring6_page_dir;
+ /* TQM ring 7 page directory. */
+ uint64_t tqm_ring7_page_dir;
+ /* MR/AV page directory. */
+ uint64_t mrav_page_dir;
+ /* Timer page directory. */
+ uint64_t tim_page_dir;
+ /* Number of QPs. */
+ uint32_t qp_num_entries;
+ /* Number of SRQs. */
+ uint32_t srq_num_entries;
+ /* Number of CQs. */
+ uint32_t cq_num_entries;
+ /* Number of Stats. */
+ uint32_t stat_num_entries;
+ /* Number of TQM slowpath entries. */
+ uint32_t tqm_sp_num_entries;
+ /* Number of TQM ring 0 entries. */
+ uint32_t tqm_ring0_num_entries;
+ /* Number of TQM ring 1 entries. */
+ uint32_t tqm_ring1_num_entries;
+ /* Number of TQM ring 2 entries. */
+ uint32_t tqm_ring2_num_entries;
+ /* Number of TQM ring 3 entries. */
+ uint32_t tqm_ring3_num_entries;
+ /* Number of TQM ring 4 entries. */
+ uint32_t tqm_ring4_num_entries;
+ /* Number of TQM ring 5 entries. */
+ uint32_t tqm_ring5_num_entries;
+ /* Number of TQM ring 6 entries. */
+ uint32_t tqm_ring6_num_entries;
+ /* Number of TQM ring 7 entries. */
+ uint32_t tqm_ring7_num_entries;
+ /* Number of MR/AV entries. */
+ uint32_t mrav_num_entries;
+ /* Number of Timer entries. */
+ uint32_t tim_num_entries;
+ /* Number of entries to reserve for QP1 */
+ uint16_t qp_num_qp1_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t qp_num_l2_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t qp_entry_size;
+ /* Number of entries to reserve for L2 */
+ uint16_t srq_num_l2_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t srq_entry_size;
+ /* Number of entries to reserve for L2 */
+ uint16_t cq_num_l2_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t cq_entry_size;
+ /* Number of entries to reserve for VNIC entries */
+ uint16_t vnic_num_vnic_entries;
+ /* Number of entries to reserve for Ring table entries */
+ uint16_t vnic_num_ring_table_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t vnic_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t stat_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t tqm_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t mrav_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t tim_entry_size;
+} __attribute__((packed));
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_func_backing_store_qcfg *
+ ********************************/
+
+
+/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * When this bit is set to '1', the PHY for the port shall be
- * reset. # If this bit is set to 1, then the HWRM shall reset
- * the PHY after applying PHY configuration changes specified in
- * this command. # In order to guarantee that PHY configuration
- * changes specified in this command take effect, the HWRM
- * client should set this flag to 1. # If this bit is not set to
- * 1, then the HWRM may reset the PHY depending on the current
- * PHY configuration and settings specified in this command.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1)
- /* deprecated bit. Do not use!!! */
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED UINT32_C(0x2)
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */
+struct hwrm_func_backing_store_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /*
+ * When set, the firmware only uses on-chip resources and does not
+ * expect any backing store to be provided by the host driver. This
+ * mode provides minimal L2 functionality (e.g. limited L2 resources,
+ * no RoCE).
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_FLAGS_PREBOOT_MODE \
+ UINT32_C(0x1)
+ uint8_t unused_0[4];
+ /*
+ * This bit must be '1' for the qp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_QP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the srq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_SRQ \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_CQ \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the vnic fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_VNIC \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stat fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_STAT \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the tqm_sp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_SP \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the tqm_ring0 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING0 \
+ UINT32_C(0x40)
/*
- * When this bit is set to '1', the link shall be forced to the
- * force_link_speed value. When this bit is set to '1', the HWRM
- * client should not enable any of the auto negotiation related
- * fields represented by auto_XXX fields in this command. When
- * this bit is set to '1' and the HWRM client has enabled a
- * auto_XXX field in this command, then the HWRM shall ignore
- * the enabled auto_XXX field. When this bit is set to zero, the
- * link shall be allowed to autoneg.
+ * This bit must be '1' for the tqm_ring1 fields to be
+ * configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE UINT32_C(0x4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING1 \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the tqm_ring2 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING2 \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the tqm_ring3 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING3 \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the tqm_ring4 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING4 \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the tqm_ring5 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING5 \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the tqm_ring6 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING6 \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the tqm_ring7 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING7 \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the mrav fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_MRAV \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the tim fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TIM \
+ UINT32_C(0x8000)
+ /* QPC page size and level. */
+ uint8_t qpc_pg_size_qpc_lvl;
+ /* QPC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2
+ /* QPC page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G
+ /* SRQ page size and level. */
+ uint8_t srq_pg_size_srq_lvl;
+ /* SRQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2
+ /* SRQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G
+ /* CQ page size and level. */
+ uint8_t cq_pg_size_cq_lvl;
+ /* CQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2
+ /* CQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G
+ /* VNIC page size and level. */
+ uint8_t vnic_pg_size_vnic_lvl;
+ /* VNIC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2
+ /* VNIC page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G
+ /* Stat page size and level. */
+ uint8_t stat_pg_size_stat_lvl;
+ /* Stat PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2
+ /* Stat page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G
+ /* TQM slow path page size and level. */
+ uint8_t tqm_sp_pg_size_tqm_sp_lvl;
+ /* TQM slow path PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2
+ /* TQM slow path page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G
+ /* TQM ring 0 page size and level. */
+ uint8_t tqm_ring0_pg_size_tqm_ring0_lvl;
+ /* TQM ring 0 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2
+ /* TQM ring 0 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G
+ /* TQM ring 1 page size and level. */
+ uint8_t tqm_ring1_pg_size_tqm_ring1_lvl;
+ /* TQM ring 1 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2
+ /* TQM ring 1 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G
+ /* TQM ring 2 page size and level. */
+ uint8_t tqm_ring2_pg_size_tqm_ring2_lvl;
+ /* TQM ring 2 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2
+ /* TQM ring 2 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G
+ /* TQM ring 3 page size and level. */
+ uint8_t tqm_ring3_pg_size_tqm_ring3_lvl;
+ /* TQM ring 3 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2
+ /* TQM ring 3 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G
+ /* TQM ring 4 page size and level. */
+ uint8_t tqm_ring4_pg_size_tqm_ring4_lvl;
+ /* TQM ring 4 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2
+ /* TQM ring 4 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G
+ /* TQM ring 5 page size and level. */
+ uint8_t tqm_ring5_pg_size_tqm_ring5_lvl;
+ /* TQM ring 5 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2
+ /* TQM ring 5 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G
+ /* TQM ring 6 page size and level. */
+ uint8_t tqm_ring6_pg_size_tqm_ring6_lvl;
+ /* TQM ring 6 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2
+ /* TQM ring 6 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G
+ /* TQM ring 7 page size and level. */
+ uint8_t tqm_ring7_pg_size_tqm_ring7_lvl;
+ /* TQM ring 7 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2
+ /* TQM ring 7 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G
+ /* MR/AV page size and level. */
+ uint8_t mrav_pg_size_mrav_lvl;
+ /* MR/AV PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2
+ /* MR/AV page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G
+ /* Timer page size and level. */
+ uint8_t tim_pg_size_tim_lvl;
+ /* Timer PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2
+ /* Timer page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G
+ /* QP page directory. */
+ uint64_t qpc_page_dir;
+ /* SRQ page directory. */
+ uint64_t srq_page_dir;
+ /* CQ page directory. */
+ uint64_t cq_page_dir;
+ /* VNIC page directory. */
+ uint64_t vnic_page_dir;
+ /* Stat page directory. */
+ uint64_t stat_page_dir;
+ /* TQM slowpath page directory. */
+ uint64_t tqm_sp_page_dir;
+ /* TQM ring 0 page directory. */
+ uint64_t tqm_ring0_page_dir;
+ /* TQM ring 1 page directory. */
+ uint64_t tqm_ring1_page_dir;
+ /* TQM ring 2 page directory. */
+ uint64_t tqm_ring2_page_dir;
+ /* TQM ring 3 page directory. */
+ uint64_t tqm_ring3_page_dir;
+ /* TQM ring 4 page directory. */
+ uint64_t tqm_ring4_page_dir;
+ /* TQM ring 5 page directory. */
+ uint64_t tqm_ring5_page_dir;
+ /* TQM ring 6 page directory. */
+ uint64_t tqm_ring6_page_dir;
+ /* TQM ring 7 page directory. */
+ uint64_t tqm_ring7_page_dir;
+ /* MR/AV page directory. */
+ uint64_t mrav_page_dir;
+ /* Timer page directory. */
+ uint64_t tim_page_dir;
+ /* Number of entries to reserve for QP1 */
+ uint16_t qp_num_qp1_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t qp_num_l2_entries;
+ /* Number of QPs. */
+ uint32_t qp_num_entries;
+ /* Number of SRQs. */
+ uint32_t srq_num_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t srq_num_l2_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t cq_num_l2_entries;
+ /* Number of CQs. */
+ uint32_t cq_num_entries;
+ /* Number of entries to reserve for VNIC entries */
+ uint16_t vnic_num_vnic_entries;
+ /* Number of entries to reserve for Ring table entries */
+ uint16_t vnic_num_ring_table_entries;
+ /* Number of Stats. */
+ uint32_t stat_num_entries;
+ /* Number of TQM slowpath entries. */
+ uint32_t tqm_sp_num_entries;
+ /* Number of TQM ring 0 entries. */
+ uint32_t tqm_ring0_num_entries;
+ /* Number of TQM ring 1 entries. */
+ uint32_t tqm_ring1_num_entries;
+ /* Number of TQM ring 2 entries. */
+ uint32_t tqm_ring2_num_entries;
+ /* Number of TQM ring 3 entries. */
+ uint32_t tqm_ring3_num_entries;
+ /* Number of TQM ring 4 entries. */
+ uint32_t tqm_ring4_num_entries;
+ /* Number of TQM ring 5 entries. */
+ uint32_t tqm_ring5_num_entries;
+ /* Number of TQM ring 6 entries. */
+ uint32_t tqm_ring6_num_entries;
+ /* Number of TQM ring 7 entries. */
+ uint32_t tqm_ring7_num_entries;
+ /* Number of MR/AV entries. */
+ uint32_t mrav_num_entries;
+ /* Number of Timer entries. */
+ uint32_t tim_num_entries;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_port_phy_cfg *
+ *********************/
+
+
+/* hwrm_port_phy_cfg_input (size:448b/56B) */
+struct hwrm_port_phy_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is set to '1', the PHY for the port shall
+ * be reset.
+ *
+ * # If this bit is set to 1, then the HWRM shall reset the
+ * PHY after applying PHY configuration changes specified
+ * in this command.
+ * # In order to guarantee that PHY configuration changes
+ * specified in this command take effect, the HWRM
+ * client should set this flag to 1.
+ * # If this bit is not set to 1, then the HWRM may reset
+ * the PHY depending on the current PHY configuration and
+ * settings specified in this command.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY \
+ UINT32_C(0x1)
+ /* deprecated bit. Do not use!!! */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', the link shall be forced to
+ * the force_link_speed value.
+ *
+ * When this bit is set to '1', the HWRM client should
+ * not enable any of the auto negotiation related
+ * fields represented by auto_XXX fields in this command.
+ * When this bit is set to '1' and the HWRM client has
+ * enabled a auto_XXX field in this command, then the
+ * HWRM shall ignore the enabled auto_XXX field.
+ *
+ * When this bit is set to zero, the link
+ * shall be allowed to autoneg.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE \
+ UINT32_C(0x4)
/*
* When this bit is set to '1', the auto-negotiation process
* shall be restarted on the link.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG \
+ UINT32_C(0x8)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE)
- * is requested to be enabled on this link. If EEE is not
- * supported on this port, then this flag shall be ignored by
- * the HWRM.
+ * When this bit is set to '1', Energy Efficient Ethernet
+ * (EEE) is requested to be enabled on this link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE \
+ UINT32_C(0x10)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE)
- * is requested to be disabled on this link. If EEE is not
- * supported on this port, then this flag shall be ignored by
- * the HWRM.
+ * When this bit is set to '1', Energy Efficient Ethernet
+ * (EEE) is requested to be disabled on this link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE \
+ UINT32_C(0x20)
/*
- * When this bit is set to '1' and EEE is enabled on this link,
- * then TX LPI is requested to be enabled on the link. If EEE is
- * not supported on this port, then this flag shall be ignored
- * by the HWRM. If EEE is disabled on this port, then this flag
+ * When this bit is set to '1' and EEE is enabled on this
+ * link, then TX LPI is requested to be enabled on the link.
+ * If EEE is not supported on this port, then this flag
* shall be ignored by the HWRM.
+ * If EEE is disabled on this port, then this flag shall be
+ * ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE \
+ UINT32_C(0x40)
/*
- * When this bit is set to '1' and EEE is enabled on this link,
- * then TX LPI is requested to be disabled on the link. If EEE
- * is not supported on this port, then this flag shall be
- * ignored by the HWRM. If EEE is disabled on this port, then
- * this flag shall be ignored by the HWRM.
+ * When this bit is set to '1' and EEE is enabled on this
+ * link, then TX LPI is requested to be disabled on the link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
+ * If EEE is disabled on this port, then this flag shall be
+ * ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE \
+ UINT32_C(0x80)
/*
- * When set to 1, then the HWRM shall enable FEC
- * autonegotitation on this port if supported. When set to 0,
- * then this flag shall be ignored. If FEC autonegotiation is
- * not supported, then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall enable FEC autonegotitation
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC autonegotiation is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE \
+ UINT32_C(0x100)
/*
- * When set to 1, then the HWRM shall disable FEC
- * autonegotiation on this port if supported. When set to 0,
- * then this flag shall be ignored. If FEC autonegotiation is
- * not supported, then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall disable FEC autonegotiation
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC autonegotiation is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \
UINT32_C(0x200)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire
- * Code) on this port if supported. When set to 0, then this
- * flag shall be ignored. If FEC CLAUSE 74 is not supported,
- * then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire Code)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \
UINT32_C(0x400)
/*
- * When set to 1, then the HWRM shall disable FEC CLAUSE 74
- * (Fire Code) on this port if supported. When set to 0, then
- * this flag shall be ignored. If FEC CLAUSE 74 is not
- * supported, then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire Code)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \
UINT32_C(0x800)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed
- * Solomon) on this port if supported. When set to 0, then this
- * flag shall be ignored. If FEC CLAUSE 91 is not supported,
- * then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed Solomon)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \
UINT32_C(0x1000)
/*
- * When set to 1, then the HWRM shall disable FEC CLAUSE 91
- * (Reed Solomon) on this port if supported. When set to 0, then
- * this flag shall be ignored. If FEC CLAUSE 91 is not
- * supported, then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 91 (Reed Solomon)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \
UINT32_C(0x2000)
/*
- * When this bit is set to '1', the link shall be forced to be
- * taken down. # When this bit is set to '1", all other command
- * input settings related to the link speed shall be ignored.
- * Once the link state is forced down, it can be explicitly
- * cleared from that state by setting this flag to '0'. # If
- * this flag is set to '0', then the link shall be cleared from
- * forced down state if the link is in forced down state. There
- * may be conditions (e.g. out-of-band or sideband configuration
- * changes for the link) outside the scope of the HWRM
- * implementation that may clear forced down link state.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN UINT32_C(0x4000)
- uint32_t enables;
- /* This bit must be '1' for the auto_mode field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1)
- /* This bit must be '1' for the auto_duplex field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX UINT32_C(0x2)
- /* This bit must be '1' for the auto_pause field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE UINT32_C(0x4)
+ * When this bit is set to '1', the link shall be forced to
+ * be taken down.
+ *
+ * # When this bit is set to '1", all other
+ * command input settings related to the link speed shall
+ * be ignored.
+ * Once the link state is forced down, it can be
+ * explicitly cleared from that state by setting this flag
+ * to '0'.
+ * # If this flag is set to '0', then the link shall be
+ * cleared from forced down state if the link is in forced
+ * down state.
+ * There may be conditions (e.g. out-of-band or sideband
+ * configuration changes for the link) outside the scope
+ * of the HWRM implementation that may clear forced down
+ * link state.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN \
+ UINT32_C(0x4000)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the auto_mode field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the auto_duplex field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the auto_pause field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE \
+ UINT32_C(0x4)
/*
* This bit must be '1' for the auto_link_speed field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED \
+ UINT32_C(0x8)
/*
* This bit must be '1' for the auto_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \
UINT32_C(0x10)
- /* This bit must be '1' for the wirespeed field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIOUTPUTEED UINT32_C(0x20)
- /* This bit must be '1' for the lpbk field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK UINT32_C(0x40)
- /* This bit must be '1' for the preemphasis field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS UINT32_C(0x80)
- /* This bit must be '1' for the force_pause field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the wirespeed field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the lpbk field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the preemphasis field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the force_pause field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE \
+ UINT32_C(0x100)
/*
* This bit must be '1' for the eee_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \
UINT32_C(0x200)
- /* This bit must be '1' for the tx_lpi_timer field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400)
- uint16_t port_id;
+ /*
+ * This bit must be '1' for the tx_lpi_timer field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER \
+ UINT32_C(0x400)
/* Port ID of port that is to be configured. */
- uint16_t force_link_speed;
+ uint16_t port_id;
/*
- * This is the speed that will be used if the force bit is '1'.
- * If unsupported speed is selected, an error will be generated.
+ * This is the speed that will be used if the force
+ * bit is '1'. If unsupported speed is selected, an error
+ * will be generated.
*/
+ uint16_t force_link_speed;
/* 100Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
- uint8_t auto_mode;
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB
/*
- * This value is used to identify what autoneg mode is used when
- * the link speed is not being forced.
+ * This value is used to identify what autoneg mode is
+ * used when the link speed is not being forced.
*/
- /*
- * Disable autoneg or autoneg disabled. No
- * speeds are selected.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0)
+ uint8_t auto_mode;
+ /* Disable autoneg or autoneg disabled. No speeds are selected. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0)
/* Select all possible speeds for autoneg mode. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for
- * autoneg mode. This mode has been DEPRECATED.
- * An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode has
+ * been DEPRECATED. An HWRM client should not use this mode.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below
- * that speed for autoneg. This mode has been
- * DEPRECATED. An HWRM client should not use
- * this mode.
+ * Select the auto_link_speed or any speed below that speed for autoneg.
+ * This mode has been DEPRECATED. An HWRM client should not use this mode.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
- * Select the speeds based on the corresponding
- * link speed mask value that is provided.
+ * Select the speeds based on the corresponding link speed mask value
+ * that is provided.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
- uint8_t auto_duplex;
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK
/*
- * This is the duplex setting that will be used if the
- * autoneg_mode is "one_speed" or "one_or_below".
+ * This is the duplex setting that will be used if the autoneg_mode
+ * is "one_speed" or "one_or_below".
*/
+ uint8_t auto_duplex;
/* Half Duplex will be requested. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0)
/* Full duplex will be requested. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1)
/* Both Half and Full dupex will be requested. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2)
- uint8_t auto_pause;
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
/*
- * This value is used to configure the pause that will be used
- * for autonegotiation. Add text on the usage of auto_pause and
- * force_pause.
+ * This value is used to configure the pause that will be
+ * used for autonegotiation.
+ * Add text on the usage of auto_pause and force_pause.
*/
+ uint8_t auto_pause;
/*
- * When this bit is '1', Generation of tx pause messages has
- * been requested. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * has been requested. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX \
+ UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages has been
- * requested. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * has been requested. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX \
+ UINT32_C(0x2)
/*
- * When set to 1, the advertisement of pause is enabled. # When
- * the auto_mode is not set to none and this flag is set to 1,
- * then the auto_pause bits on this port are being advertised
- * and autoneg pause results are being interpreted. # When the
- * auto_mode is not set to none and this flag is set to 0, the
- * pause is forced as indicated in force_pause, and also
- * advertised as auto_pause bits, but the autoneg results are
- * not interpreted since the pause configuration is being
- * forced. # When the auto_mode is set to none and this flag is
- * set to 1, auto_pause bits should be ignored and should be set
- * to 0.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
- uint8_t unused_0;
- uint16_t auto_link_speed;
+ * When set to 1, the advertisement of pause is enabled.
+ *
+ * # When the auto_mode is not set to none and this flag is
+ * set to 1, then the auto_pause bits on this port are being
+ * advertised and autoneg pause results are being interpreted.
+ * # When the auto_mode is not set to none and this
+ * flag is set to 0, the pause is forced as indicated in
+ * force_pause, and also advertised as auto_pause bits, but
+ * the autoneg results are not interpreted since the pause
+ * configuration is being forced.
+ * # When the auto_mode is set to none and this flag is set to
+ * 1, auto_pause bits should be ignored and should be set to 0.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE \
+ UINT32_C(0x4)
+ uint8_t unused_0;
/*
- * This is the speed that will be used if the autoneg_mode is
- * "one_speed" or "one_or_below". If an unsupported speed is
- * selected, an error will be generated.
+ * This is the speed that will be used if the autoneg_mode
+ * is "one_speed" or "one_or_below". If an unsupported speed
+ * is selected, an error will be generated.
*/
+ uint16_t auto_link_speed;
/* 100Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
- uint16_t auto_link_speed_mask;
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB
/*
* This is a mask of link speeds that will be used if
- * autoneg_mode is "mask". If unsupported speed is enabled an
- * error will be generated.
+ * autoneg_mode is "mask". If unsupported speed is enabled
+ * an error will be generated.
*/
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ uint16_t auto_link_speed_mask;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
+ /* 100Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
+ /* 1Gb link speed (Half-duplex) */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \
UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \
UINT32_C(0x10)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \
UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB \
+ UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \
UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \
UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \
UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \
UINT32_C(0x2000)
- uint8_t wirespeed;
/* This value controls the wirespeed feature. */
+ uint8_t wirespeed;
/* Wirespeed feature is disabled. */
- #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_OFF UINT32_C(0x0)
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF UINT32_C(0x0)
/* Wirespeed feature is enabled. */
- #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_ON UINT32_C(0x1)
- uint8_t lpbk;
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON
/* This value controls the loopback setting for the PHY. */
- /* No loopback is selected. Normal operation. */
- #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1)
/*
- * The HW will be configured with local loopback
- * such that host data is sent back to the host
- * without modification.
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2)
/*
- * The HW will be configured with remote
- * loopback such that port logic will send
- * packets back out the transmitter that are
- * received.
+ * The HW will be configured with external loopback such that
+ * host data is sent on the trasmitter and based on the external
+ * loopback connection the data will be received without modification.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2)
- uint8_t force_pause;
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL UINT32_C(0x3)
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL
/*
- * This value is used to configure the pause that will be used
- * for force mode.
+ * This value is used to configure the pause that will be
+ * used for force mode.
*/
+ uint8_t force_pause;
/*
- * When this bit is '1', Generation of tx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2)
- uint8_t unused_1;
- uint32_t preemphasis;
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+ uint8_t unused_1;
/*
- * This value controls the pre-emphasis to be used for the link.
- * Driver should not set this value (use enable.preemphasis = 0)
- * unless driver is sure of setting. Normally HWRM FW will
- * determine proper pre-emphasis.
+ * This value controls the pre-emphasis to be used for the
+ * link. Driver should not set this value (use
+ * enable.preemphasis = 0) unless driver is sure of setting.
+ * Normally HWRM FW will determine proper pre-emphasis.
*/
- uint16_t eee_link_speed_mask;
+ uint32_t preemphasis;
/*
- * Setting for link speed mask that is used to advertise speeds
- * during autonegotiation when EEE is enabled. This field is
- * valid only when EEE is enabled. The speeds specified in this
- * field shall be a subset of speeds specified in
- * auto_link_speed_mask. If EEE is enabled,then at least one
- * speed shall be provided in this mask.
+ * Setting for link speed mask that is used to
+ * advertise speeds during autonegotiation when EEE is enabled.
+ * This field is valid only when EEE is enabled.
+ * The speeds specified in this field shall be a subset of
+ * speeds specified in auto_link_speed_mask.
+ * If EEE is enabled,then at least one speed shall be provided
+ * in this mask.
*/
+ uint16_t eee_link_speed_mask;
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 \
+ UINT32_C(0x10)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 \
+ UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40)
- uint8_t unused_2;
- uint8_t unused_3;
- uint32_t tx_lpi_timer;
- uint32_t unused_4;
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ uint8_t unused_2[2];
/*
- * Reuested setting of TX LPI timer in microseconds. This field
- * is valid only when EEE is enabled and TX LPI is enabled.
+ * Reuested setting of TX LPI timer in microseconds.
+ * This field is valid only when EEE is enabled and TX LPI is
+ * enabled.
*/
+ uint32_t tx_lpi_timer;
#define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
- #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
+ uint32_t unused_3;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
struct hwrm_port_phy_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
*/
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to invalid speed */
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED UINT32_C(0x1)
+ /*
+ * retry the command since the phy is not ready.
+ * retry count is returned in opaque_0.
+ * This is only valid for the first command and
+ * this value will not change for successive calls.
+ * but if a 0 is returned at any time then this should
+ * be treated as an un recoverable failure,
+ *
+ * retry interval in milli seconds is returned in opaque_1.
+ * This specifies the time that user should wait before
+ * issuing the next port_phy_cfg command.
+ */
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_LAST \
+ HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ uint8_t unused_0[7];
} __attribute__((packed));
-/* hwrm_port_phy_qcfg */
-/* Description: This command queries the PHY configuration for the port. */
-/* Input (24 bytes) */
+/**********************
+ * hwrm_port_phy_qcfg *
+ **********************/
+
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
struct hwrm_port_phy_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port that is to be queried. */
- uint16_t unused_0[3];
+ uint16_t port_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (96 bytes) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
struct hwrm_port_phy_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint8_t link;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* This value indicates the current link status. */
+ uint8_t link;
/* There is no link or cable detected. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0)
/* There is no link, but a cable has been detected. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1)
/* There is a link. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2)
- uint8_t unused_0;
- uint16_t link_speed;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK
+ uint8_t unused_0;
/* This value indicates the current link speed of the connection. */
+ uint16_t link_speed;
/* 100Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff)
- uint8_t duplex_cfg;
- /* This value is indicates the duplex of the current connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB
+ /*
+ * This value is indicates the duplex of the current
+ * configuration.
+ */
+ uint8_t duplex_cfg;
/* Half Duplex connection. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0)
/* Full duplex connection. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1)
- uint8_t pause;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL
/*
- * This value is used to indicate the current pause
- * configuration. When autoneg is enabled, this value represents
- * the autoneg results of pause configuration.
+ * This value is used to indicate the current
+ * pause configuration. When autoneg is enabled, this value
+ * represents the autoneg results of pause configuration.
*/
+ uint8_t pause;
/*
- * When this bit is '1', Generation of tx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2)
- uint16_t support_speeds;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2)
/*
- * The supported speeds for the port. This is a bit mask. For
- * each speed that is supported, the corrresponding bit will be
- * set to '1'.
+ * The supported speeds for the port. This is a bit mask.
+ * For each speed that is supported, the corrresponding
+ * bit will be set to '1'.
*/
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB UINT32_C(0x8)
+ uint16_t support_speeds;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB \
+ UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB UINT32_C(0x10)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB \
+ UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB \
+ UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB \
+ UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB \
+ UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB \
+ UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB \
+ UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000)
- uint16_t force_link_speed;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB \
+ UINT32_C(0x2000)
/*
- * Current setting of forced link speed. When the link speed is
- * not being forced, this value shall be set to 0.
+ * Current setting of forced link speed.
+ * When the link speed is not being forced, this
+ * value shall be set to 0.
*/
+ uint16_t force_link_speed;
/* 100Mb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB \
+ UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB \
+ UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \
+ UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
- uint8_t auto_mode;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB \
+ UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB
/* Current setting of auto negotiation mode. */
- /*
- * Disable autoneg or autoneg disabled. No
- * speeds are selected.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0)
+ uint8_t auto_mode;
+ /* Disable autoneg or autoneg disabled. No speeds are selected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0)
/* Select all possible speeds for autoneg mode. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for
- * autoneg mode. This mode has been DEPRECATED.
- * An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode has
+ * been DEPRECATED. An HWRM client should not use this mode.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below
- * that speed for autoneg. This mode has been
- * DEPRECATED. An HWRM client should not use
- * this mode.
+ * Select the auto_link_speed or any speed below that speed for autoneg.
+ * This mode has been DEPRECATED. An HWRM client should not use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
- * Select the speeds based on the corresponding
- * link speed mask value that is provided.
+ * Select the speeds based on the corresponding link speed mask value
+ * that is provided.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
- uint8_t auto_pause;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK
/*
- * Current setting of pause autonegotiation. Move autoneg_pause
- * flag here.
+ * Current setting of pause autonegotiation.
+ * Move autoneg_pause flag here.
*/
+ uint8_t auto_pause;
/*
- * When this bit is '1', Generation of tx pause messages has
- * been requested. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * has been requested. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX \
+ UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages has been
- * requested. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * has been requested. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX \
+ UINT32_C(0x2)
/*
- * When set to 1, the advertisement of pause is enabled. # When
- * the auto_mode is not set to none and this flag is set to 1,
- * then the auto_pause bits on this port are being advertised
- * and autoneg pause results are being interpreted. # When the
- * auto_mode is not set to none and this flag is set to 0, the
- * pause is forced as indicated in force_pause, and also
- * advertised as auto_pause bits, but the autoneg results are
- * not interpreted since the pause configuration is being
- * forced. # When the auto_mode is set to none and this flag is
- * set to 1, auto_pause bits should be ignored and should be set
- * to 0.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
- uint16_t auto_link_speed;
+ * When set to 1, the advertisement of pause is enabled.
+ *
+ * # When the auto_mode is not set to none and this flag is
+ * set to 1, then the auto_pause bits on this port are being
+ * advertised and autoneg pause results are being interpreted.
+ * # When the auto_mode is not set to none and this
+ * flag is set to 0, the pause is forced as indicated in
+ * force_pause, and also advertised as auto_pause bits, but
+ * the autoneg results are not interpreted since the pause
+ * configuration is being forced.
+ * # When the auto_mode is set to none and this flag is set to
+ * 1, auto_pause bits should be ignored and should be set to 0.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \
+ UINT32_C(0x4)
/*
- * Current setting for auto_link_speed. This field is only valid
- * when auto_mode is set to "one_speed" or "one_or_below".
+ * Current setting for auto_link_speed. This field is only
+ * valid when auto_mode is set to "one_speed" or "one_or_below".
*/
+ uint16_t auto_link_speed;
/* 100Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
- uint16_t auto_link_speed_mask;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB \
+ UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB
/*
* Current setting for auto_link_speed_mask that is used to
- * advertise speeds during autonegotiation. This field is only
- * valid when auto_mode is set to "mask". The speeds specified
- * in this field shall be a subset of supported speeds on this
- * port.
- */
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ * advertise speeds during autonegotiation.
+ * This field is only valid when auto_mode is set to "mask".
+ * The speeds specified in this field shall be a subset of
+ * supported speeds on this port.
+ */
+ uint16_t auto_link_speed_mask;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
UINT32_C(0x10)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \
UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \
UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \
UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
UINT32_C(0x2000)
- uint8_t wirespeed;
/* Current setting for wirespeed. */
+ uint8_t wirespeed;
/* Wirespeed feature is disabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_OFF UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF UINT32_C(0x0)
/* Wirespeed feature is enabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_ON UINT32_C(0x1)
- uint8_t lpbk;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON
/* Current setting for loopback. */
- /* No loopback is selected. Normal operation. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
/*
- * The HW will be configured with local loopback
- * such that host data is sent back to the host
- * without modification.
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
/*
- * The HW will be configured with remote
- * loopback such that port logic will send
- * packets back out the transmitter that are
- * received.
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
- uint8_t force_pause;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
/*
- * Current setting of forced pause. When the pause configuration
- * is not being forced, then this value shall be set to 0.
+ * The HW will be configured with external loopback such that
+ * host data is sent on the trasmitter and based on the external
+ * loopback connection the data will be received without modification.
*/
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL
/*
- * When this bit is '1', Generation of tx pause messages is
- * supported. Disabled otherwise.
+ * Current setting of forced pause.
+ * When the pause configuration is not being forced, then
+ * this value shall be set to 0.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ uint8_t force_pause;
/*
- * When this bit is '1', Reception of rx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2)
- uint8_t module_status;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1)
/*
- * This value indicates the current status of the optics module
- * on this port.
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+ /*
+ * This value indicates the current status of the optics module on
+ * this port.
*/
+ uint8_t module_status;
/* Module is inserted and accepted */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE \
+ UINT32_C(0x0)
/* Module is rejected and transmit side Laser is disabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \
+ UINT32_C(0x1)
/* Module mismatch warning. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \
+ UINT32_C(0x2)
/* Module is rejected and powered down. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN \
+ UINT32_C(0x3)
/* Module is not inserted. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
UINT32_C(0x4)
/* Module status is not applicable. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
UINT32_C(0xff)
- uint32_t preemphasis;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE
/* Current setting for preemphasis. */
- uint8_t phy_maj;
+ uint32_t preemphasis;
/* This field represents the major version of the PHY. */
- uint8_t phy_min;
+ uint8_t phy_maj;
/* This field represents the minor version of the PHY. */
- uint8_t phy_bld;
+ uint8_t phy_min;
/* This field represents the build version of the PHY. */
- uint8_t phy_type;
+ uint8_t phy_bld;
/* This value represents a PHY type. */
+ uint8_t phy_type;
/* Unknown */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN \
+ UINT32_C(0x0)
/* BASE-CR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR UINT32_C(0x1)
- /* BASE-KR4 (Deprecated) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR \
+ UINT32_C(0x1)
+ /* BASE-KR4 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 \
+ UINT32_C(0x2)
/* BASE-LR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR \
+ UINT32_C(0x3)
/* BASE-SR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR UINT32_C(0x4)
- /* BASE-KR2 (Deprecated) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 UINT32_C(0x5)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR \
+ UINT32_C(0x4)
+ /* BASE-KR2 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 \
+ UINT32_C(0x5)
/* BASE-KX */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX UINT32_C(0x6)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX \
+ UINT32_C(0x6)
/* BASE-KR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR UINT32_C(0x7)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR \
+ UINT32_C(0x7)
/* BASE-T */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET UINT32_C(0x8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET \
+ UINT32_C(0x8)
/* EEE capable BASE-T */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE UINT32_C(0x9)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE \
+ UINT32_C(0x9)
/* SGMII connected external PHY */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY UINT32_C(0xa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY \
+ UINT32_C(0xa)
/* 25G_BASECR_CA_L */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L UINT32_C(0xb)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L \
+ UINT32_C(0xb)
/* 25G_BASECR_CA_S */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S UINT32_C(0xc)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S \
+ UINT32_C(0xc)
/* 25G_BASECR_CA_N */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N UINT32_C(0xd)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N \
+ UINT32_C(0xd)
/* 25G_BASESR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR UINT32_C(0xe)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR \
+ UINT32_C(0xe)
/* 100G_BASECR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 UINT32_C(0xf)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 \
+ UINT32_C(0xf)
/* 100G_BASESR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 UINT32_C(0x10)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 \
+ UINT32_C(0x10)
/* 100G_BASELR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 UINT32_C(0x11)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 \
+ UINT32_C(0x11)
/* 100G_BASEER4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 UINT32_C(0x12)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 \
+ UINT32_C(0x12)
/* 100G_BASESR10 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 UINT32_C(0x13)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 \
+ UINT32_C(0x13)
/* 40G_BASECR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 UINT32_C(0x14)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 \
+ UINT32_C(0x14)
/* 40G_BASESR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 UINT32_C(0x15)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 \
+ UINT32_C(0x15)
/* 40G_BASELR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 UINT32_C(0x16)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 \
+ UINT32_C(0x16)
/* 40G_BASEER4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 UINT32_C(0x17)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 \
+ UINT32_C(0x17)
/* 40G_ACTIVE_CABLE */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \
UINT32_C(0x18)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET UINT32_C(0x19)
+ /* 1G_baseT */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET \
+ UINT32_C(0x19)
/* 1G_baseSX */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX UINT32_C(0x1a)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX \
+ UINT32_C(0x1a)
/* 1G_baseCX */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX UINT32_C(0x1b)
- uint8_t media_type;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX \
+ UINT32_C(0x1b)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX
/* This value represents a media type. */
+ uint8_t media_type;
/* Unknown */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0)
/* Twisted Pair */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1)
/* Direct Attached Copper */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2)
/* Fiber */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3)
- uint8_t xcvr_pkg_type;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE
/* This value represents a transceiver type. */
+ uint8_t xcvr_pkg_type;
/* PHY and MAC are in the same package */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
UINT32_C(0x1)
/* PHY and MAC are in different packages */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
UINT32_C(0x2)
- uint8_t eee_config_phy_addr;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ uint8_t eee_config_phy_addr;
+ /* This field represents PHY address. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK \
+ UINT32_C(0x1f)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
/*
* This field represents flags related to EEE configuration.
* These EEE configuration flags are valid only when the
- * auto_mode is not set to none (in other words autonegotiation
+ * auto_mode is not set to none (in other words autonegotiation
* is enabled).
*/
- /* This field represents PHY address. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5
/*
- * When set to 1, Energy Efficient Ethernet (EEE) mode is
- * enabled. Speeds for autoneg with EEE mode enabled are based
- * on eee_link_speed_mask.
+ * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled.
+ * Speeds for autoneg with EEE mode enabled
+ * are based on eee_link_speed_mask.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \
+ UINT32_C(0x20)
/*
- * This flag is valid only when eee_enabled is set to 1. # If
- * eee_enabled is set to 0, then EEE mode is disabled and this
- * flag shall be ignored. # If eee_enabled is set to 1 and this
- * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
- * is enabled and in use. # If eee_enabled is set to 1 and this
- * flag is set to 0, then Energy Efficient Ethernet (EEE) mode
- * is enabled but is currently not in use.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE UINT32_C(0x40)
+ * This flag is valid only when eee_enabled is set to 1.
+ *
+ * # If eee_enabled is set to 0, then EEE mode is disabled
+ * and this flag shall be ignored.
+ * # If eee_enabled is set to 1 and this flag is set to 1,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * and in use.
+ * # If eee_enabled is set to 1 and this flag is set to 0,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * but is currently not in use.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE \
+ UINT32_C(0x40)
/*
- * This flag is valid only when eee_enabled is set to 1. # If
- * eee_enabled is set to 0, then EEE mode is disabled and this
- * flag shall be ignored. # If eee_enabled is set to 1 and this
- * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
- * is enabled and TX LPI is enabled. # If eee_enabled is set to
- * 1 and this flag is set to 0, then Energy Efficient Ethernet
- * (EEE) mode is enabled but TX LPI is disabled.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI UINT32_C(0x80)
+ * This flag is valid only when eee_enabled is set to 1.
+ *
+ * # If eee_enabled is set to 0, then EEE mode is disabled
+ * and this flag shall be ignored.
+ * # If eee_enabled is set to 1 and this flag is set to 1,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * and TX LPI is enabled.
+ * # If eee_enabled is set to 1 and this flag is set to 0,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * but TX LPI is disabled.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI \
+ UINT32_C(0x80)
/*
- * This field represents flags related to EEE configuration.
- * These EEE configuration flags are valid only when the
- * auto_mode is not set to none (in other words autonegotiation
- * is enabled).
+ * When set to 1, the parallel detection is used to determine
+ * the speed of the link partner.
+ *
+ * Parallel detection is used when a autonegotiation capable
+ * device is connected to a link parter that is not capable
+ * of autonegotiation.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK UINT32_C(0xe0)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5
- uint8_t parallel_detect;
- /* Reserved field, set to 0 */
+ uint8_t parallel_detect;
/*
* When set to 1, the parallel detection is used to determine
- * the speed of the link partner. Parallel detection is used
- * when a autonegotiation capable device is connected to a link
- * parter that is not capable of autonegotiation.
+ * the speed of the link partner.
+ *
+ * Parallel detection is used when a autonegotiation capable
+ * device is connected to a link parter that is not capable
+ * of autonegotiation.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1)
- /* Reserved field, set to 0 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_MASK UINT32_C(0xfe)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_SFT 1
- uint16_t link_partner_adv_speeds;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1)
/*
- * The advertised speeds for the port by the link partner. Each
- * advertised speed will be set to '1'.
+ * The advertised speeds for the port by the link partner.
+ * Each advertised speed will be set to '1'.
*/
- /* 100Mb link speed (Half-duplex) */
+ uint16_t link_partner_adv_speeds;
+ /* 100Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \
UINT32_C(0x10)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \
UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \
UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \
UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \
UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \
UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \
UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
UINT32_C(0x2000)
- uint8_t link_partner_adv_auto_mode;
/*
- * The advertised autoneg for the port by the link partner. This
- * field is deprecated and should be set to 0.
- */
- /*
- * Disable autoneg or autoneg disabled. No
- * speeds are selected.
+ * The advertised autoneg for the port by the link partner.
+ * This field is deprecated and should be set to 0.
*/
+ uint8_t link_partner_adv_auto_mode;
+ /* Disable autoneg or autoneg disabled. No speeds are selected. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \
UINT32_C(0x0)
/* Select all possible speeds for autoneg mode. */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \
UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for
- * autoneg mode. This mode has been DEPRECATED.
- * An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode has
+ * been DEPRECATED. An HWRM client should not use this mode.
*/
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below
- * that speed for autoneg. This mode has been
- * DEPRECATED. An HWRM client should not use
- * this mode.
+ * Select the auto_link_speed or any speed below that speed for autoneg.
+ * This mode has been DEPRECATED. An HWRM client should not use this mode.
*/
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
UINT32_C(0x3)
/*
- * Select the speeds based on the corresponding
- * link speed mask value that is provided.
+ * Select the speeds based on the corresponding link speed mask value
+ * that is provided.
*/
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \
UINT32_C(0x4)
- uint8_t link_partner_adv_pause;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
/* The advertised pause settings on the port by the link partner. */
+ uint8_t link_partner_adv_pause;
/*
- * When this bit is '1', Generation of tx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \
UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \
UINT32_C(0x2)
- uint16_t adv_eee_link_speed_mask;
/*
- * Current setting for link speed mask that is used to advertise
- * speeds during autonegotiation when EEE is enabled. This field
- * is valid only when eee_enabled flags is set to 1. The speeds
- * specified in this field shall be a subset of speeds specified
- * in auto_link_speed_mask.
+ * Current setting for link speed mask that is used to
+ * advertise speeds during autonegotiation when EEE is enabled.
+ * This field is valid only when eee_enabled flags is set to 1.
+ * The speeds specified in this field shall be a subset of
+ * speeds specified in auto_link_speed_mask.
*/
+ uint16_t adv_eee_link_speed_mask;
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
UINT32_C(0x8)
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
UINT32_C(0x10)
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \
UINT32_C(0x40)
- uint16_t link_partner_adv_eee_link_speed_mask;
/*
- * Current setting for link speed mask that is advertised by the
- * link partner when EEE is enabled. This field is valid only
- * when eee_enabled flags is set to 1.
+ * Current setting for link speed mask that is advertised by
+ * the link partner when EEE is enabled.
+ * This field is valid only when eee_enabled flags is set to 1.
*/
+ uint16_t link_partner_adv_eee_link_speed_mask;
/* Reserved */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
/* Reserved */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \
UINT32_C(0x8)
/* Reserved */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
UINT32_C(0x10)
/* Reserved */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
UINT32_C(0x20)
/* 10Gb link speed */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \
UINT32_C(0x40)
- uint32_t xcvr_identifier_type_tx_lpi_timer;
- /* This value represents transceiver identifier type. */
+ uint32_t xcvr_identifier_type_tx_lpi_timer;
/*
- * Current setting of TX LPI timer in microseconds. This field
- * is valid only when_eee_enabled flag is set to 1 and
- * tx_lpi_enabled is set to 1.
+ * Current setting of TX LPI timer in microseconds.
+ * This field is valid only when_eee_enabled flag is set to 1
+ * and tx_lpi_enabled is set to 1.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
/* This value represents transceiver identifier type. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
UINT32_C(0xff000000)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24
/* Unknown */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
(UINT32_C(0x0) << 24)
/* SFP/SFP+/SFP28 */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
(UINT32_C(0x3) << 24)
- /* QSFP */
+ /* QSFP+ */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
(UINT32_C(0xc) << 24)
/* QSFP+ */
@@ -5550,2637 +10877,7759 @@ struct hwrm_port_phy_qcfg_output {
/* QSFP28 */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
(UINT32_C(0x11) << 24)
- uint16_t fec_cfg;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28
/*
- * This value represents the current configuration of Forward
- * Error Correction (FEC) on the port.
+ * This value represents the current configuration of
+ * Forward Error Correction (FEC) on the port.
*/
+ uint16_t fec_cfg;
/*
- * When set to 1, then FEC is not supported on this port. If
- * this flag is set to 1, then all other FEC configuration flags
- * shall be ignored. When set to 0, then FEC is supported as
- * indicated by other configuration flags. If no cable is
- * attached and the HWRM does not yet know the FEC capability,
- * then the HWRM shall set this flag to 1 when reporting FEC
- * capability.
+ * When set to 1, then FEC is not supported on this port. If this flag
+ * is set to 1, then all other FEC configuration flags shall be ignored.
+ * When set to 0, then FEC is supported as indicated by other
+ * configuration flags.
+ * If no cable is attached and the HWRM does not yet know the FEC
+ * capability, then the HWRM shall set this flag to 1 when reporting
+ * FEC capability.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
UINT32_C(0x1)
/*
- * When set to 1, then FEC autonegotiation is supported on this
- * port. When set to 0, then FEC autonegotiation is not
- * supported on this port.
+ * When set to 1, then FEC autonegotiation is supported on this port.
+ * When set to 0, then FEC autonegotiation is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \
UINT32_C(0x2)
/*
- * When set to 1, then FEC autonegotiation is enabled on this
- * port. When set to 0, then FEC autonegotiation is disabled if
- * supported. This flag should be ignored if FEC autonegotiation
- * is not supported on this port.
+ * When set to 1, then FEC autonegotiation is enabled on this port.
+ * When set to 0, then FEC autonegotiation is disabled if supported.
+ * This flag should be ignored if FEC autonegotiation is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \
UINT32_C(0x4)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on
- * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
- * not supported on this port.
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this port.
+ * When set to 0, then FEC CLAUSE 74 (Fire Code) is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \
UINT32_C(0x8)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on
- * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
- * disabled if supported. This flag should be ignored if FEC
- * CLAUSE 74 is not supported on this port.
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on this port.
+ * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if supported.
+ * This flag should be ignored if FEC CLAUSE 74 is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \
UINT32_C(0x10)
/*
- * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported
- * on this port. When set to 0, then FEC CLAUSE 91 (Reed
- * Solomon) is not supported on this port.
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported on this port.
+ * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \
UINT32_C(0x20)
/*
- * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled
- * on this port. When set to 0, then FEC CLAUSE 91 (Reed
- * Solomon) is disabled if supported. This flag should be
- * ignored if FEC CLAUSE 91 is not supported on this port.
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled on this port.
+ * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is disabled if supported.
+ * This flag should be ignored if FEC CLAUSE 91 is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \
UINT32_C(0x40)
- uint8_t duplex_state;
/*
- * This value is indicates the duplex of the current connection
- * state.
+ * This value is indicates the duplex of the current
+ * connection state.
*/
+ uint8_t duplex_state;
/* Half Duplex connection. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0)
/* Full duplex connection. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1)
- uint8_t unused_1;
- char phy_vendor_name[16];
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL
+ /* Option flags fields. */
+ uint8_t option_flags;
+ /* When this bit is '1', Media auto detect is enabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_MEDIA_AUTO_DETECT \
+ UINT32_C(0x1)
/*
- * Up to 16 bytes of null padded ASCII string representing PHY
- * vendor. If the string is set to null, then the vendor name is
- * not available.
+ * Up to 16 bytes of null padded ASCII string representing
+ * PHY vendor.
+ * If the string is set to null, then the vendor name is not
+ * available.
*/
- char phy_vendor_partnumber[16];
+ char phy_vendor_name[16];
/*
- * Up to 16 bytes of null padded ASCII string that identifies
- * vendor specific part number of the PHY. If the string is set
- * to null, then the vendor specific part number is not
- * available.
+ * Up to 16 bytes of null padded ASCII string that
+ * identifies vendor specific part number of the PHY.
+ * If the string is set to null, then the vendor specific
+ * part number is not available.
+ */
+ char phy_vendor_partnumber[16];
+ uint8_t unused_2[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_port_mac_cfg *
+ *********************/
+
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
+struct hwrm_port_mac_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * In this field, there are a number of CoS mappings related flags
+ * that are used to configure CoS mappings and their corresponding
+ * priorities in the hardware.
+ * For the priorities of CoS mappings, the HWRM uses the following
+ * priority order (high to low) by default:
+ * # vlan pri
+ * # ip_dscp
+ * # tunnel_vlan_pri
+ * # default cos
+ *
+ * A subset of CoS mappings can be enabled.
+ * If a priority is not specified for an enabled CoS mapping, the
+ * priority will be assigned in the above order for the enabled CoS
+ * mappings. For example, if vlan_pri and ip_dscp CoS mappings are
+ * enabled and their priorities are not specified, the following
+ * priority order (high to low) will be used by the HWRM:
+ * # vlan_pri
+ * # ip_dscp
+ * # default cos
+ *
+ * vlan_pri CoS mapping together with default CoS with lower priority
+ * are enabled by default by the HWRM.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', this command will configure
+ * the MAC to match the current link state of the PHY.
+ * If the link is not established on the PHY, then this
+ * bit has no effect.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_MATCH_LINK \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', the inner VLAN PRI to CoS mapping
+ * is requested to be enabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_ENABLE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', tunnel VLAN PRI field to
+ * CoS mapping is requested to be enabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', the IP DSCP to CoS mapping is
+ * requested to be enabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_ENABLE \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * enable timestamp capture capability on the receive side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * disable timestamp capture capability on the receive side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * enable timestamp capture capability on the transmit side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * disable timestamp capture capability on the transmit side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE \
+ UINT32_C(0x80)
+ /*
+ * When this bit is '1', the Out-Of-Box WoL is requested to
+ * be enabled on this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_ENABLE \
+ UINT32_C(0x100)
+ /*
+ * When this bit is '1', the the Out-Of-Box WoL is requested to
+ * be disabled on this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_DISABLE \
+ UINT32_C(0x200)
+ /*
+ * When this bit is set to '1', the inner VLAN PRI to CoS mapping
+ * is requested to be disabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_DISABLE \
+ UINT32_C(0x400)
+ /*
+ * When this bit is set to '1', tunnel VLAN PRI field to
+ * CoS mapping is requested to be disabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_DISABLE \
+ UINT32_C(0x800)
+ /*
+ * When this bit is set to '1', the IP DSCP to CoS mapping is
+ * requested to be disabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_DISABLE \
+ UINT32_C(0x1000)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the ipg field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_IPG \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the lpbk field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_LPBK \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the vlan_pri2cos_map_pri field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_VLAN_PRI2COS_MAP_PRI \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the tunnel_pri2cos_map_pri field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TUNNEL_PRI2COS_MAP_PRI \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the dscp2cos_map_pri field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_DSCP2COS_MAP_PRI \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the cos_field_cfg field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_COS_FIELD_CFG \
+ UINT32_C(0x100)
+ /* Port ID of port that is to be configured. */
+ uint16_t port_id;
+ /*
+ * This value is used to configure the minimum IPG that will
+ * be sent between packets by this port.
+ */
+ uint8_t ipg;
+ /* This value controls the loopback setting for the MAC. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2)
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE
+ /*
+ * This value controls the priority setting of VLAN PRI to CoS
+ * mapping based on VLAN Tags of inner packet headers of
+ * tunneled packets or packet headers of non-tunneled packets.
+ *
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being specified.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ */
+ uint8_t vlan_pri2cos_map_pri;
+ /* Reserved field. */
+ uint8_t reserved1;
+ /*
+ * This value controls the priority setting of VLAN PRI to CoS
+ * mapping based on VLAN Tags of tunneled header.
+ * This mapping only applies when tunneled headers
+ * are present.
+ *
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being specified.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ */
+ uint8_t tunnel_pri2cos_map_pri;
+ /*
+ * This value controls the priority setting of IP DSCP to CoS
+ * mapping based on inner IP header of tunneled packets or
+ * IP header of non-tunneled packets.
+ *
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being specified.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ */
+ uint8_t dscp2pri_map_pri;
+ /*
+ * This is a 16-bit bit mask that is used to request a
+ * specific configuration of time stamp capture of PTP messages
+ * on the receive side of this port.
+ * This field shall be ignored if the ptp_rx_ts_capture_enable
+ * flag is not set in this command.
+ * Otherwise, if bit 'i' is set, then the HWRM is being
+ * requested to configure the receive side of the port to
+ * capture the time stamp of every received PTP message
+ * with messageType field value set to i.
+ */
+ uint16_t rx_ts_capture_ptp_msg_type;
+ /*
+ * This is a 16-bit bit mask that is used to request a
+ * specific configuration of time stamp capture of PTP messages
+ * on the transmit side of this port.
+ * This field shall be ignored if the ptp_tx_ts_capture_enable
+ * flag is not set in this command.
+ * Otherwise, if bit 'i' is set, then the HWRM is being
+ * requested to configure the transmit sied of the port to
+ * capture the time stamp of every transmitted PTP message
+ * with messageType field value set to i.
+ */
+ uint16_t tx_ts_capture_ptp_msg_type;
+ /* Configuration of CoS fields. */
+ uint8_t cos_field_cfg;
+ /* Reserved */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_RSVD1 \
+ UINT32_C(0x1)
+ /*
+ * This field is used to specify selection of VLAN PRI value
+ * based on whether one or two VLAN Tags are present in
+ * the inner packet headers of tunneled packets or
+ * non-tunneled packets.
+ * This field is valid only if inner VLAN PRI to CoS mapping
+ * is enabled.
+ * If VLAN PRI to CoS mapping is not enabled, then this
+ * field shall be ignored.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x6)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \
+ 1
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 1)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the inner packet headers.
+ * No VLAN PRI shall be selected for this configuration
+ * if only one VLAN Tag is present in the inner
+ * packet headers.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 1)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 1)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field is used to specify selection of tunnel VLAN
+ * PRI value based on whether one or two VLAN Tags are
+ * present in tunnel headers.
+ * This field is valid only if tunnel VLAN PRI to CoS mapping
+ * is enabled.
+ * If tunnel VLAN PRI to CoS mapping is not enabled, then this
+ * field shall be ignored.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x18)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \
+ 3
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 3)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the tunnel packet headers.
+ * No tunnel VLAN PRI shall be selected for this
+ * configuration if only one VLAN Tag is present in
+ * the tunnel packet headers.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 3)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 3)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 3)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field shall be used to provide default CoS value
+ * that has been configured on this port.
+ * This field is valid only if default CoS mapping
+ * is enabled.
+ * If default CoS mapping is not enabled, then this
+ * field shall be ignored.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \
+ 5
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be received on the port.
+ * This value does not include the number of bytes used by
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mru;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be transmitted on the port.
+ * This value does not include the number of bytes used by
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mtu;
+ /* Current configuration of the IPG value. */
+ uint8_t ipg;
+ /* Current value of the loopback value. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LAST \
+ HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_port_mac_qcfg *
+ **********************/
+
+
+/* hwrm_port_mac_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is to be configured. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_mac_qcfg_output (size:192b/24B) */
+struct hwrm_port_mac_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be received on the port.
+ * This value does not include the number of bytes used by the
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mru;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be transmitted on the port.
+ * This value does not include the number of bytes used by the
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mtu;
+ /*
+ * The minimum IPG that will
+ * be sent between packets by this port.
+ */
+ uint8_t ipg;
+ /* The loopback setting for the MAC. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE
+ /*
+ * Priority setting for VLAN PRI to CoS mapping.
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being used.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ * # If the correspoding CoS mapping is not enabled, then this
+ * field should be ignored.
+ * # This value indicates the normalized priority value retained
+ * in the HWRM.
+ */
+ uint8_t vlan_pri2cos_map_pri;
+ /*
+ * In this field, a number of CoS mappings related flags
+ * are used to indicate configured CoS mappings.
+ */
+ uint8_t flags;
+ /*
+ * When this bit is set to '1', the inner VLAN PRI to CoS mapping
+ * is enabled.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_VLAN_PRI2COS_ENABLE \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', tunnel VLAN PRI field to
+ * CoS mapping is enabled.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', the IP DSCP to CoS mapping is
+ * enabled.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_IP_DSCP2COS_ENABLE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the Out-Of-Box WoL is enabled on this
+ * port.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_OOB_WOL_ENABLE \
+ UINT32_C(0x8)
+ /* When this bit is '1', PTP is enabled for RX on this port. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x10)
+ /* When this bit is '1', PTP is enabled for TX on this port. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x20)
+ /*
+ * Priority setting for tunnel VLAN PRI to CoS mapping.
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being used.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ * # If the correspoding CoS mapping is not enabled, then this
+ * field should be ignored.
+ * # This value indicates the normalized priority value retained
+ * in the HWRM.
+ */
+ uint8_t tunnel_pri2cos_map_pri;
+ /*
+ * Priority setting for DSCP to PRI mapping.
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being used.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ * # If the correspoding CoS mapping is not enabled, then this
+ * field should be ignored.
+ * # This value indicates the normalized priority value retained
+ * in the HWRM.
+ */
+ uint8_t dscp2pri_map_pri;
+ /*
+ * This is a 16-bit bit mask that represents the
+ * current configuration of time stamp capture of PTP messages
+ * on the receive side of this port.
+ * If bit 'i' is set, then the receive side of the port
+ * is configured to capture the time stamp of every
+ * received PTP message with messageType field value set
+ * to i.
+ * If all bits are set to 0 (i.e. field value set 0),
+ * then the receive side of the port is not configured
+ * to capture timestamp for PTP messages.
+ * If all bits are set to 1, then the receive side of the
+ * port is configured to capture timestamp for all PTP
+ * messages.
+ */
+ uint16_t rx_ts_capture_ptp_msg_type;
+ /*
+ * This is a 16-bit bit mask that represents the
+ * current configuration of time stamp capture of PTP messages
+ * on the transmit side of this port.
+ * If bit 'i' is set, then the transmit side of the port
+ * is configured to capture the time stamp of every
+ * received PTP message with messageType field value set
+ * to i.
+ * If all bits are set to 0 (i.e. field value set 0),
+ * then the transmit side of the port is not configured
+ * to capture timestamp for PTP messages.
+ * If all bits are set to 1, then the transmit side of the
+ * port is configured to capture timestamp for all PTP
+ * messages.
+ */
+ uint16_t tx_ts_capture_ptp_msg_type;
+ /* Configuration of CoS fields. */
+ uint8_t cos_field_cfg;
+ /* Reserved */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_RSVD \
+ UINT32_C(0x1)
+ /*
+ * This field is used for selecting VLAN PRI value
+ * based on whether one or two VLAN Tags are present in
+ * the inner packet headers of tunneled packets or
+ * non-tunneled packets.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x6)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \
+ 1
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 1)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the inner packet headers.
+ * No VLAN PRI is selected for this configuration
+ * if only one VLAN Tag is present in the inner
+ * packet headers.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 1)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the inner packet headers
*/
- uint32_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t unused_5;
- uint8_t valid;
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 1)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used for selecting tunnel VLAN PRI value
+ * based on whether one or two VLAN Tags are present in
+ * the tunnel headers of tunneled packets. This selection
+ * does not apply to non-tunneled packets.
*/
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x18)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \
+ 3
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 3)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the tunnel packet headers.
+ * No VLAN PRI is selected for this configuration
+ * if only one VLAN Tag is present in the tunnel
+ * packet headers.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 3)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 3)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 3)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field is used to provide default CoS value that
+ * has been configured on this port.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \
+ 5
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_port_qstats */
-/* Description: This function returns per port Ethernet statistics. */
-/* Input (40 bytes) */
+/**************************
+ * hwrm_port_mac_ptp_qcfg *
+ **************************/
+
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * In this field, a number of PTP related flags
+ * are used to indicate configured PTP capabilities.
+ */
+ uint8_t flags;
+ /*
+ * When this bit is set to '1', the PTP related registers are
+ * directly accessible by the host.
+ */
+ #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', the PTP information is accessible
+ * via HWRM commands.
+ */
+ #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS \
+ UINT32_C(0x2)
+ uint8_t unused_0[3];
+ /* Offset of the PTP register for the lower 32 bits of timestamp for RX. */
+ uint32_t rx_ts_reg_off_lower;
+ /* Offset of the PTP register for the upper 32 bits of timestamp for RX. */
+ uint32_t rx_ts_reg_off_upper;
+ /* Offset of the PTP register for the sequence ID for RX. */
+ uint32_t rx_ts_reg_off_seq_id;
+ /* Offset of the first PTP source ID for RX. */
+ uint32_t rx_ts_reg_off_src_id_0;
+ /* Offset of the second PTP source ID for RX. */
+ uint32_t rx_ts_reg_off_src_id_1;
+ /* Offset of the third PTP source ID for RX. */
+ uint32_t rx_ts_reg_off_src_id_2;
+ /* Offset of the domain ID for RX. */
+ uint32_t rx_ts_reg_off_domain_id;
+ /* Offset of the PTP FIFO register for RX. */
+ uint32_t rx_ts_reg_off_fifo;
+ /* Offset of the PTP advance FIFO register for RX. */
+ uint32_t rx_ts_reg_off_fifo_adv;
+ /* PTP timestamp granularity for RX. */
+ uint32_t rx_ts_reg_off_granularity;
+ /* Offset of the PTP register for the lower 32 bits of timestamp for TX. */
+ uint32_t tx_ts_reg_off_lower;
+ /* Offset of the PTP register for the upper 32 bits of timestamp for TX. */
+ uint32_t tx_ts_reg_off_upper;
+ /* Offset of the PTP register for the sequence ID for TX. */
+ uint32_t tx_ts_reg_off_seq_id;
+ /* Offset of the PTP FIFO register for TX. */
+ uint32_t tx_ts_reg_off_fifo;
+ /* PTP timestamp granularity for TX. */
+ uint32_t tx_ts_reg_off_granularity;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_port_qstats *
+ ********************/
+
+
+/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port that is being queried. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2[3];
- uint8_t unused_3;
- uint64_t tx_stat_host_addr;
- /* This is the host address where Tx port statistics will be stored */
- uint64_t rx_stat_host_addr;
- /* This is the host address where Rx port statistics will be stored */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+ /*
+ * This is the host address where
+ * Tx port statistics will be stored
+ */
+ uint64_t tx_stat_host_addr;
+ /*
+ * This is the host address where
+ * Rx port statistics will be stored
+ */
+ uint64_t rx_stat_host_addr;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_port_qstats_output (size:128b/16B) */
struct hwrm_port_qstats_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The size of TX port statistics block in bytes. */
+ uint16_t tx_stat_size;
+ /* The size of RX port statistics block in bytes. */
+ uint16_t rx_stat_size;
+ uint8_t unused_0[3];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_port_qstats_ext *
+ ************************/
+
+
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t tx_stat_size;
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ /*
+ * The size of TX port extended
+ * statistics block in bytes.
+ */
+ uint16_t tx_stat_size;
+ /*
+ * The size of RX port extended
+ * statistics block in bytes
+ */
+ uint16_t rx_stat_size;
+ uint8_t unused_0[2];
+ /*
+ * This is the host address where
+ * Tx port statistics will be stored
+ */
+ uint64_t tx_stat_host_addr;
+ /*
+ * This is the host address where
+ * Rx port statistics will be stored
+ */
+ uint64_t rx_stat_host_addr;
+} __attribute__((packed));
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* The size of TX port statistics block in bytes. */
- uint16_t rx_stat_size;
+ uint16_t tx_stat_size;
/* The size of RX port statistics block in bytes. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t rx_stat_size;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_port_clr_stats */
-/*
- * Description: This function clears per port statistics. The HWRM shall not
- * allow a VF driver to clear port statistics. The HWRM shall not allow a PF
- * driver to clear port statistics in a partitioning mode. The HWRM may allow a
- * PF driver to clear port statistics in the non-partitioning mode.
- */
-/* Input (24 bytes) */
+/*************************
+ * hwrm_port_lpbk_qstats *
+ *************************/
+
+
+/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+struct hwrm_port_lpbk_qstats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+struct hwrm_port_lpbk_qstats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of transmitted unicast frames */
+ uint64_t lpbk_ucast_frames;
+ /* Number of transmitted multicast frames */
+ uint64_t lpbk_mcast_frames;
+ /* Number of transmitted broadcast frames */
+ uint64_t lpbk_bcast_frames;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t lpbk_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t lpbk_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t lpbk_bcast_bytes;
+ /* Total Tx Drops for loopback traffic reported by STATS block */
+ uint64_t tx_stat_discard;
+ /* Total Tx Error Drops for loopback traffic reported by STATS block */
+ uint64_t tx_stat_error;
+ /* Total Rx Drops for loopback traffic reported by STATS block */
+ uint64_t rx_stat_discard;
+ /* Total Rx Error Drops for loopback traffic reported by STATS block */
+ uint64_t rx_stat_error;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_port_clr_stats *
+ ***********************/
+
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
struct hwrm_port_clr_stats_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port that is being queried. */
- uint16_t unused_0[3];
+ uint16_t port_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_port_clr_stats_output (size:128b/16B) */
struct hwrm_port_clr_stats_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_port_lpbk_clr_stats *
+ ****************************/
+
+
+/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
} __attribute__((packed));
-/* hwrm_port_led_cfg */
-/*
- * Description: This function is used to configure LEDs on a given port. Each
- * port has individual set of LEDs associated with it. These LEDs are used for
- * speed/link configuration as well as activity indicator configuration. Up to
- * three LEDs can be configured, one for activity and two for speeds.
- */
-/* Input (64 bytes) */
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_port_ts_query *
+ **********************/
+
+
+/* hwrm_port_ts_query_input (size:192b/24B) */
+struct hwrm_port_ts_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_LAST \
+ HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Timestamp value of PTP message captured. */
+ uint64_t ptp_msg_ts;
+ /* Sequence ID of the PTP message captured. */
+ uint16_t ptp_msg_seqid;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_port_phy_qcaps *
+ ***********************/
+
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+struct hwrm_port_phy_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_phy_qcaps_output (size:192b/24B) */
+struct hwrm_port_phy_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* PHY capability flags */
+ uint8_t flags;
+ /*
+ * If set to 1, then this field indicates that the
+ * link is capable of supporting EEE.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then this field indicates that the
+ * PHY is capable of supporting external loopback.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EXTERNAL_LPBK_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * Reserved field. The HWRM shall set this field to 0.
+ * An HWRM client shall ignore this field.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_SFT 2
+ /* Number of front panel ports for this device. */
+ uint8_t port_cnt;
+ /* Not supported or unknown */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_UNKNOWN UINT32_C(0x0)
+ /* single port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_1 UINT32_C(0x1)
+ /* 2-port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_2 UINT32_C(0x2)
+ /* 3-port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_3 UINT32_C(0x3)
+ /* 4-port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4 UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_LAST \
+ HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * as forced speeds on this link.
+ * For each speed that can be forced on this link, the
+ * corresponding mask bit shall be set to '1'.
+ */
+ uint16_t supported_speeds_force_mode;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MB \
+ UINT32_C(0x2000)
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * for autonegotiation on this link.
+ * For each speed that can be autonegotiated on this link, the
+ * corresponding mask bit shall be set to '1'.
+ */
+ uint16_t supported_speeds_auto_mode;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MB \
+ UINT32_C(0x2000)
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * for EEE on this link.
+ * For each speed that can be autonegotiated when EEE is enabled
+ * on this link, the corresponding mask bit shall be set to '1'.
+ * This field is only valid when the eee_suppotred is set to '1'.
+ */
+ uint16_t supported_speeds_eee_mode;
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_100MB \
+ UINT32_C(0x2)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_1GB \
+ UINT32_C(0x8)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 \
+ UINT32_C(0x10)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_10GB \
+ UINT32_C(0x40)
+ uint32_t tx_lpi_timer_low;
+ /*
+ * The lowest value of TX LPI timer that can be set on this link
+ * when EEE is enabled. This value is in microseconds.
+ * This field is valid only when_eee_supported is set to '1'.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_SFT 0
+ /*
+ * Reserved field. The HWRM shall set this field to 0.
+ * An HWRM client shall ignore this field.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_SFT 24
+ uint32_t valid_tx_lpi_timer_high;
+ /*
+ * The highest value of TX LPI timer that can be set on this link
+ * when EEE is enabled. This value is in microseconds.
+ * This field is valid only when_eee_supported is set to '1'.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_SFT 0
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_SFT 24
+} __attribute__((packed));
+
+/***************************
+ * hwrm_port_phy_i2c_write *
+ ***************************/
+
+
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the page_offset field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_I2C_WRITE_INPUT_ENABLES_PAGE_OFFSET \
+ UINT32_C(0x1)
+ /* Port ID of port. */
+ uint16_t port_id;
+ /* 8-bit I2C slave address. */
+ uint8_t i2c_slave_addr;
+ uint8_t unused_0;
+ /* The page number that is being accessed over I2C. */
+ uint16_t page_number;
+ /* Offset within the page that is being accessed over I2C. */
+ uint16_t page_offset;
+ /*
+ * Length of data to write, in bytes starting at the offset
+ * specified above. If the offset is not specified, then
+ * the data shall be written from the beginning of the page.
+ */
+ uint8_t data_length;
+ uint8_t unused_1[7];
+ /* Up to 64B of data. */
+ uint32_t data[16];
+} __attribute__((packed));
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_port_phy_i2c_read *
+ **************************/
+
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+struct hwrm_port_phy_i2c_read_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the page_offset field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET \
+ UINT32_C(0x1)
+ /* Port ID of port. */
+ uint16_t port_id;
+ /* 8-bit I2C slave address. */
+ uint8_t i2c_slave_addr;
+ uint8_t unused_0;
+ /* The page number that is being accessed over I2C. */
+ uint16_t page_number;
+ /* Offset within the page that is being accessed over I2C. */
+ uint16_t page_offset;
+ /*
+ * Length of data to read, in bytes starting at the offset
+ * specified above. If the offset is not specified, then
+ * the data shall be read from the beginning of the page.
+ */
+ uint8_t data_length;
+ uint8_t unused_1[7];
+} __attribute__((packed));
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+struct hwrm_port_phy_i2c_read_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Up to 64B of data. */
+ uint32_t data[16];
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_port_led_cfg *
+ *********************/
+
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
struct hwrm_port_led_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t cmpl_ring;
+ uint16_t seq_id;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t target_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint64_t resp_addr;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * This bit must be '1' for the led0_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the led0_state field to be
+ * configured.
*/
- uint32_t enables;
- /* This bit must be '1' for the led0_id field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID UINT32_C(0x1)
- /* This bit must be '1' for the led0_state field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE UINT32_C(0x2)
- /* This bit must be '1' for the led0_color field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the led0_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR \
+ UINT32_C(0x4)
/*
* This bit must be '1' for the led0_blink_on field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON UINT32_C(0x8)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON \
+ UINT32_C(0x8)
/*
* This bit must be '1' for the led0_blink_off field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF UINT32_C(0x10)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF \
+ UINT32_C(0x10)
/*
* This bit must be '1' for the led0_group_id field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID UINT32_C(0x20)
- /* This bit must be '1' for the led1_id field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID UINT32_C(0x40)
- /* This bit must be '1' for the led1_state field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE UINT32_C(0x80)
- /* This bit must be '1' for the led1_color field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR UINT32_C(0x100)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the led1_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the led1_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the led1_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR \
+ UINT32_C(0x100)
/*
* This bit must be '1' for the led1_blink_on field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON UINT32_C(0x200)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON \
+ UINT32_C(0x200)
/*
* This bit must be '1' for the led1_blink_off field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF UINT32_C(0x400)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF \
+ UINT32_C(0x400)
/*
* This bit must be '1' for the led1_group_id field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID UINT32_C(0x800)
- /* This bit must be '1' for the led2_id field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID UINT32_C(0x1000)
- /* This bit must be '1' for the led2_state field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE UINT32_C(0x2000)
- /* This bit must be '1' for the led2_color field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR UINT32_C(0x4000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the led2_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the led2_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the led2_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR \
+ UINT32_C(0x4000)
/*
* This bit must be '1' for the led2_blink_on field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON UINT32_C(0x8000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON \
+ UINT32_C(0x8000)
/*
* This bit must be '1' for the led2_blink_off field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF UINT32_C(0x10000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF \
+ UINT32_C(0x10000)
/*
* This bit must be '1' for the led2_group_id field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID UINT32_C(0x20000)
- /* This bit must be '1' for the led3_id field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID UINT32_C(0x40000)
- /* This bit must be '1' for the led3_state field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE UINT32_C(0x80000)
- /* This bit must be '1' for the led3_color field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR UINT32_C(0x100000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID \
+ UINT32_C(0x20000)
+ /*
+ * This bit must be '1' for the led3_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID \
+ UINT32_C(0x40000)
+ /*
+ * This bit must be '1' for the led3_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE \
+ UINT32_C(0x80000)
+ /*
+ * This bit must be '1' for the led3_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR \
+ UINT32_C(0x100000)
/*
* This bit must be '1' for the led3_blink_on field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON UINT32_C(0x200000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON \
+ UINT32_C(0x200000)
/*
* This bit must be '1' for the led3_blink_off field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \
UINT32_C(0x400000)
/*
* This bit must be '1' for the led3_group_id field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID UINT32_C(0x800000)
- uint16_t port_id;
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID \
+ UINT32_C(0x800000)
/* Port ID of port whose LEDs are configured. */
- uint8_t num_leds;
+ uint16_t port_id;
/*
- * The number of LEDs that are being configured. Up to 4 LEDs
- * can be configured with this command.
+ * The number of LEDs that are being configured.
+ * Up to 4 LEDs can be configured with this command.
*/
- uint8_t rsvd;
+ uint8_t num_leds;
/* Reserved field. */
- uint8_t led0_id;
+ uint8_t rsvd;
/* An identifier for the LED #0. */
- uint8_t led0_state;
+ uint8_t led0_id;
/* The requested state of the LED #0. */
+ uint8_t led0_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led0_color;
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT
/* The requested color of LED #0. */
+ uint8_t led0_color;
/* Default */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_0;
- uint16_t led0_blink_on;
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER
+ uint8_t unused_0;
/*
- * If the LED #0 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led0_blink_off;
+ uint16_t led0_blink_on;
/*
- * If the LED #0 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led0_group_id;
+ uint16_t led0_blink_off;
/*
- * An identifier for the group of LEDs that LED #0 belongs to.
- * If set to 0, then the LED #0 shall not be grouped and shall
- * be treated as an individual resource. For all other non-zero
- * values of this field, LED #0 shall be grouped together with
- * the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #0 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t rsvd0;
+ uint8_t led0_group_id;
/* Reserved field. */
- uint8_t led1_id;
+ uint8_t rsvd0;
/* An identifier for the LED #1. */
- uint8_t led1_state;
+ uint8_t led1_id;
/* The requested state of the LED #1. */
+ uint8_t led1_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led1_color;
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT
/* The requested color of LED #1. */
+ uint8_t led1_color;
/* Default */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_1;
- uint16_t led1_blink_on;
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER
+ uint8_t unused_1;
/*
- * If the LED #1 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led1_blink_off;
+ uint16_t led1_blink_on;
/*
- * If the LED #1 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led1_group_id;
+ uint16_t led1_blink_off;
/*
- * An identifier for the group of LEDs that LED #1 belongs to.
- * If set to 0, then the LED #1 shall not be grouped and shall
- * be treated as an individual resource. For all other non-zero
- * values of this field, LED #1 shall be grouped together with
- * the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #1 belongs
+ * to.
+ * If set to 0, then the LED #1 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #1 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t rsvd1;
+ uint8_t led1_group_id;
/* Reserved field. */
- uint8_t led2_id;
+ uint8_t rsvd1;
/* An identifier for the LED #2. */
- uint8_t led2_state;
+ uint8_t led2_id;
/* The requested state of the LED #2. */
+ uint8_t led2_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led2_color;
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT
/* The requested color of LED #2. */
+ uint8_t led2_color;
/* Default */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_2;
- uint16_t led2_blink_on;
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER
+ uint8_t unused_2;
/*
- * If the LED #2 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led2_blink_off;
+ uint16_t led2_blink_on;
/*
- * If the LED #2 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led2_group_id;
+ uint16_t led2_blink_off;
/*
- * An identifier for the group of LEDs that LED #2 belongs to.
- * If set to 0, then the LED #2 shall not be grouped and shall
- * be treated as an individual resource. For all other non-zero
- * values of this field, LED #2 shall be grouped together with
- * the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #2 belongs
+ * to.
+ * If set to 0, then the LED #2 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #2 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t rsvd2;
+ uint8_t led2_group_id;
/* Reserved field. */
- uint8_t led3_id;
+ uint8_t rsvd2;
/* An identifier for the LED #3. */
- uint8_t led3_state;
+ uint8_t led3_id;
/* The requested state of the LED #3. */
+ uint8_t led3_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led3_color;
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT
/* The requested color of LED #3. */
+ uint8_t led3_color;
/* Default */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_3;
- uint16_t led3_blink_on;
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER
+ uint8_t unused_3;
/*
- * If the LED #3 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led3_blink_off;
+ uint16_t led3_blink_on;
/*
- * If the LED #3 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led3_group_id;
+ uint16_t led3_blink_off;
/*
- * An identifier for the group of LEDs that LED #3 belongs to.
- * If set to 0, then the LED #3 shall not be grouped and shall
- * be treated as an individual resource. For all other non-zero
- * values of this field, LED #3 shall be grouped together with
- * the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #3 belongs
+ * to.
+ * If set to 0, then the LED #3 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #3 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t rsvd3;
+ uint8_t led3_group_id;
/* Reserved field. */
+ uint8_t rsvd3;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_port_led_cfg_output (size:128b/16B) */
struct hwrm_port_led_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_port_led_qcfg */
-/*
- * Description: This function is used to query configuration of LEDs on a given
- * port. Each port has individual set of LEDs associated with it. These LEDs are
- * used for speed/link configuration as well as activity indicator
- * configuration. Up to three LEDs can be configured, one for activity and two
- * for speeds.
- */
-/* Input (24 bytes) */
+/**********************
+ * hwrm_port_led_qcfg *
+ **********************/
+
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
struct hwrm_port_led_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port whose LED configuration is being queried. */
- uint16_t unused_0[3];
+ uint16_t port_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (56 bytes) */
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
struct hwrm_port_led_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint8_t num_leds;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * The number of LEDs that are configured on this port. Up to 4
- * LEDs can be returned in the response.
+ * The number of LEDs that are configured on this port.
+ * Up to 4 LEDs can be returned in the response.
*/
- uint8_t led0_id;
+ uint8_t num_leds;
/* An identifier for the LED #0. */
- uint8_t led0_type;
+ uint8_t led0_id;
/* The type of LED #0. */
+ uint8_t led0_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
- uint8_t led0_state;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID
/* The current state of the LED #0. */
+ uint8_t led0_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led0_color;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT
/* The color of LED #0. */
+ uint8_t led0_color;
/* Default */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_0;
- uint16_t led0_blink_on;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER
+ uint8_t unused_0;
/*
- * If the LED #0 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led0_blink_off;
+ uint16_t led0_blink_on;
/*
- * If the LED #0 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led0_group_id;
+ uint16_t led0_blink_off;
/*
- * An identifier for the group of LEDs that LED #0 belongs to.
- * If set to 0, then the LED #0 is not grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 is not grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t led1_id;
+ uint8_t led0_group_id;
/* An identifier for the LED #1. */
- uint8_t led1_type;
+ uint8_t led1_id;
/* The type of LED #1. */
+ uint8_t led1_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
- uint8_t led1_state;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID
/* The current state of the LED #1. */
+ uint8_t led1_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led1_color;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT
/* The color of LED #1. */
+ uint8_t led1_color;
/* Default */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_1;
- uint16_t led1_blink_on;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER
+ uint8_t unused_1;
/*
- * If the LED #1 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led1_blink_off;
+ uint16_t led1_blink_on;
/*
- * If the LED #1 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led1_group_id;
+ uint16_t led1_blink_off;
/*
- * An identifier for the group of LEDs that LED #1 belongs to.
- * If set to 0, then the LED #1 is not grouped. For all other
- * non-zero values of this field, LED #1 is grouped together
- * with the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #1 belongs
+ * to.
+ * If set to 0, then the LED #1 is not grouped.
+ * For all other non-zero values of this field, LED #1 is
+ * grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t led2_id;
+ uint8_t led1_group_id;
/* An identifier for the LED #2. */
- uint8_t led2_type;
+ uint8_t led2_id;
/* The type of LED #2. */
+ uint8_t led2_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
- uint8_t led2_state;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID
/* The current state of the LED #2. */
+ uint8_t led2_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led2_color;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT
/* The color of LED #2. */
+ uint8_t led2_color;
/* Default */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_2;
- uint16_t led2_blink_on;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER
+ uint8_t unused_2;
/*
- * If the LED #2 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led2_blink_off;
+ uint16_t led2_blink_on;
/*
- * If the LED #2 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led2_group_id;
+ uint16_t led2_blink_off;
/*
- * An identifier for the group of LEDs that LED #2 belongs to.
- * If set to 0, then the LED #2 is not grouped. For all other
- * non-zero values of this field, LED #2 is grouped together
- * with the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #2 belongs
+ * to.
+ * If set to 0, then the LED #2 is not grouped.
+ * For all other non-zero values of this field, LED #2 is
+ * grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t led3_id;
+ uint8_t led2_group_id;
/* An identifier for the LED #3. */
- uint8_t led3_type;
+ uint8_t led3_id;
/* The type of LED #3. */
+ uint8_t led3_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
- uint8_t led3_state;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID
/* The current state of the LED #3. */
+ uint8_t led3_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led3_color;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT
/* The color of LED #3. */
+ uint8_t led3_color;
/* Default */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_3;
- uint16_t led3_blink_on;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER
+ uint8_t unused_3;
/*
- * If the LED #3 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led3_blink_off;
+ uint16_t led3_blink_on;
/*
- * If the LED #3 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led3_group_id;
+ uint16_t led3_blink_off;
/*
- * An identifier for the group of LEDs that LED #3 belongs to.
- * If set to 0, then the LED #3 is not grouped. For all other
- * non-zero values of this field, LED #3 is grouped together
- * with the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #3 belongs
+ * to.
+ * If set to 0, then the LED #3 is not grouped.
+ * For all other non-zero values of this field, LED #3 is
+ * grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t unused_4;
- uint16_t unused_5;
- uint8_t unused_6;
- uint8_t unused_7;
- uint8_t unused_8;
- uint8_t valid;
+ uint8_t led3_group_id;
+ uint8_t unused_4[6];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_port_led_qcaps */
-/*
- * Description: This function is used to query capabilities of LEDs on a given
- * port. Each port has individual set of LEDs associated with it. These LEDs are
- * used for speed/link configuration as well as activity indicator
- * configuration.
- */
-/* Input (24 bytes) */
+/***********************
+ * hwrm_port_led_qcaps *
+ ***********************/
+
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
struct hwrm_port_led_qcaps_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port whose LED configuration is being queried. */
- uint16_t unused_0[3];
+ uint16_t port_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (48 bytes) */
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
struct hwrm_port_led_qcaps_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint8_t num_leds;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * The number of LEDs that are configured on this port. Up to 4
- * LEDs can be returned in the response.
+ * The number of LEDs that are configured on this port.
+ * Up to 4 LEDs can be returned in the response.
*/
- uint8_t unused_0[3];
+ uint8_t num_leds;
/* Reserved for future use. */
- uint8_t led0_id;
+ uint8_t unused[3];
/* An identifier for the LED #0. */
- uint8_t led0_type;
+ uint8_t led0_id;
/* The type of LED #0. */
+ uint8_t led0_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
- uint8_t led0_group_id;
- /*
- * An identifier for the group of LEDs that LED #0 belongs to.
- * If set to 0, then the LED #0 cannot be grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
- */
- uint8_t unused_1;
- uint16_t led0_state_caps;
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led0_group_id;
+ uint8_t unused_0;
/* The states supported by LED #0. */
+ uint16_t led0_state_caps;
/*
- * If set to 1, this LED is enabled. If set to 0, this LED is
- * disabled.
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
/*
- * If set to 1, off state is supported on this LED. If set to 0,
- * off state is not supported on this LED.
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, on state is supported on this LED. If set to 0,
- * on state is not supported on this LED.
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \
UINT32_C(0x4)
/*
- * If set to 1, blink state is supported on this LED. If set to
- * 0, blink state is not supported on this LED.
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \
UINT32_C(0x8)
/*
- * If set to 1, blink_alt state is supported on this LED. If set
- * to 0, blink_alt state is not supported on this LED.
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \
UINT32_C(0x10)
- uint16_t led0_color_caps;
/* The colors supported by LED #0. */
- /* reserved */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD UINT32_C(0x1)
+ uint16_t led0_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
/*
- * If set to 1, Amber color is supported on this LED. If set to
- * 0, Amber color is not supported on this LED.
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, Green color is supported on this LED. If set to
- * 0, Green color is not supported on this LED.
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \
UINT32_C(0x4)
- uint8_t led1_id;
/* An identifier for the LED #1. */
- uint8_t led1_type;
+ uint8_t led1_id;
/* The type of LED #1. */
+ uint8_t led1_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
- uint8_t led1_group_id;
- /*
- * An identifier for the group of LEDs that LED #1 belongs to.
- * If set to 0, then the LED #0 cannot be grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
- */
- uint8_t unused_2;
- uint16_t led1_state_caps;
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led1_group_id;
+ uint8_t unused_1;
/* The states supported by LED #1. */
+ uint16_t led1_state_caps;
/*
- * If set to 1, this LED is enabled. If set to 0, this LED is
- * disabled.
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
/*
- * If set to 1, off state is supported on this LED. If set to 0,
- * off state is not supported on this LED.
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, on state is supported on this LED. If set to 0,
- * on state is not supported on this LED.
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \
UINT32_C(0x4)
/*
- * If set to 1, blink state is supported on this LED. If set to
- * 0, blink state is not supported on this LED.
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \
UINT32_C(0x8)
/*
- * If set to 1, blink_alt state is supported on this LED. If set
- * to 0, blink_alt state is not supported on this LED.
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \
UINT32_C(0x10)
- uint16_t led1_color_caps;
/* The colors supported by LED #1. */
- /* reserved */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD UINT32_C(0x1)
+ uint16_t led1_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
/*
- * If set to 1, Amber color is supported on this LED. If set to
- * 0, Amber color is not supported on this LED.
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, Green color is supported on this LED. If set to
- * 0, Green color is not supported on this LED.
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \
UINT32_C(0x4)
- uint8_t led2_id;
/* An identifier for the LED #2. */
- uint8_t led2_type;
+ uint8_t led2_id;
/* The type of LED #2. */
+ uint8_t led2_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
- uint8_t led2_group_id;
- /*
- * An identifier for the group of LEDs that LED #0 belongs to.
- * If set to 0, then the LED #0 cannot be grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
- */
- uint8_t unused_3;
- uint16_t led2_state_caps;
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led2_group_id;
+ uint8_t unused_2;
/* The states supported by LED #2. */
+ uint16_t led2_state_caps;
/*
- * If set to 1, this LED is enabled. If set to 0, this LED is
- * disabled.
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
/*
- * If set to 1, off state is supported on this LED. If set to 0,
- * off state is not supported on this LED.
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, on state is supported on this LED. If set to 0,
- * on state is not supported on this LED.
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \
UINT32_C(0x4)
/*
- * If set to 1, blink state is supported on this LED. If set to
- * 0, blink state is not supported on this LED.
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \
UINT32_C(0x8)
/*
- * If set to 1, blink_alt state is supported on this LED. If set
- * to 0, blink_alt state is not supported on this LED.
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \
UINT32_C(0x10)
- uint16_t led2_color_caps;
/* The colors supported by LED #2. */
- /* reserved */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD UINT32_C(0x1)
+ uint16_t led2_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
/*
- * If set to 1, Amber color is supported on this LED. If set to
- * 0, Amber color is not supported on this LED.
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, Green color is supported on this LED. If set to
- * 0, Green color is not supported on this LED.
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \
UINT32_C(0x4)
- uint8_t led3_id;
/* An identifier for the LED #3. */
- uint8_t led3_type;
+ uint8_t led3_id;
/* The type of LED #3. */
+ uint8_t led3_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
- uint8_t led3_group_id;
- /*
- * An identifier for the group of LEDs that LED #3 belongs to.
- * If set to 0, then the LED #0 cannot be grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
- */
- uint8_t unused_4;
- uint16_t led3_state_caps;
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led3_group_id;
+ uint8_t unused_3;
/* The states supported by LED #3. */
+ uint16_t led3_state_caps;
/*
- * If set to 1, this LED is enabled. If set to 0, this LED is
- * disabled.
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
/*
- * If set to 1, off state is supported on this LED. If set to 0,
- * off state is not supported on this LED.
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, on state is supported on this LED. If set to 0,
- * on state is not supported on this LED.
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \
UINT32_C(0x4)
/*
- * If set to 1, blink state is supported on this LED. If set to
- * 0, blink state is not supported on this LED.
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \
UINT32_C(0x8)
/*
- * If set to 1, blink_alt state is supported on this LED. If set
- * to 0, blink_alt state is not supported on this LED.
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \
UINT32_C(0x10)
- uint16_t led3_color_caps;
/* The colors supported by LED #3. */
- /* reserved */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD UINT32_C(0x1)
+ uint16_t led3_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
/*
- * If set to 1, Amber color is supported on this LED. If set to
- * 0, Amber color is not supported on this LED.
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, Green color is supported on this LED. If set to
- * 0, Green color is not supported on this LED.
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \
UINT32_C(0x4)
- uint8_t unused_5;
- uint8_t unused_6;
- uint8_t unused_7;
- uint8_t valid;
+ uint8_t unused_4[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_queue_qportcfg */
-/*
- * Description: This function is called by a driver to query queue configuration
- * of a port. # The HWRM shall at least advertise one queue with lossy service
- * profile. # The driver shall use this command to query queue ids before
- * configuring or using any queues. # If a service profile is not set for a
- * queue, then the driver shall not use that queue without configuring a service
- * profile for it. # If the driver is not allowed to configure service profiles,
- * then the driver shall only use queues for which service profiles are pre-
- * configured.
- */
-/* Input (24 bytes) */
+/***********************
+ * hwrm_queue_qportcfg *
+ ***********************/
+
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
struct hwrm_queue_qportcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
* TX and RX paths of the chip.
*/
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
/* rx path */
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
#define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
- QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
- uint16_t port_id;
+ HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
/*
* Port ID of port for which the queue configuration is being
- * queried. This field is only required when sent by IPC.
+ * queried. This field is only required when sent by IPC.
*/
- uint16_t unused_0;
+ uint16_t port_id;
+ /*
+ * Drivers will set this capability when it can use
+ * queue_idx_service_profile to map the queues to application.
+ */
+ uint8_t drv_qmap_cap;
+ /* disabled */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_DISABLED UINT32_C(0x0)
+ /* enabled */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_LAST \
+ HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED
+ uint8_t unused_0;
} __attribute__((packed));
-/* Output (32 bytes) */
+/* hwrm_queue_qportcfg_output (size:256b/32B) */
struct hwrm_queue_qportcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint8_t max_configurable_queues;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
* The maximum number of queues that can be configured on this
- * port. Valid values range from 1 through 8.
+ * port.
+ * Valid values range from 1 through 8.
*/
- uint8_t max_configurable_lossless_queues;
+ uint8_t max_configurable_queues;
/*
* The maximum number of lossless queues that can be configured
- * on this port. Valid values range from 0 through 8.
+ * on this port.
+ * Valid values range from 0 through 8.
*/
- uint8_t queue_cfg_allowed;
+ uint8_t max_configurable_lossless_queues;
/*
* Bitmask indicating which queues can be configured by the
- * hwrm_queue_cfg command. Each bit represents a specific queue
- * where bit 0 represents queue 0 and bit 7 represents queue 7.
+ * hwrm_queue_cfg command.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
* # A value of 0 indicates that the queue is not configurable
- * by the hwrm_queue_cfg command. # A value of 1 indicates that
- * the queue is configurable. # A hwrm_queue_cfg command shall
- * return error when trying to configure a queue not
- * configurable.
+ * by the hwrm_queue_cfg command.
+ * # A value of 1 indicates that the queue is configurable.
+ * # A hwrm_queue_cfg command shall return error when trying to
+ * configure a queue not configurable.
*/
- uint8_t queue_cfg_info;
+ uint8_t queue_cfg_allowed;
/* Information about queue configuration. */
- /*
- * If this flag is set to '1', then the queues are configured
- * asymmetrically on TX and RX sides. If this flag is set to
- * '0', then the queues are configured symmetrically on TX and
- * RX sides. For symmetric configuration, the queue
- * configuration including queue ids and service profiles on the
- * TX side is the same as the corresponding queue configuration
- * on the RX side.
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG UINT32_C(0x1)
- uint8_t queue_pfcenable_cfg_allowed;
+ uint8_t queue_cfg_info;
+ /*
+ * If this flag is set to '1', then the queues are
+ * configured asymmetrically on TX and RX sides.
+ * If this flag is set to '0', then the queues are
+ * configured symmetrically on TX and RX sides. For
+ * symmetric configuration, the queue configuration
+ * including queue ids and service profiles on the
+ * TX side is the same as the corresponding queue
+ * configuration on the RX side.
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
/*
* Bitmask indicating which queues can be configured by the
- * hwrm_queue_pfcenable_cfg command. Each bit represents a
- * specific priority where bit 0 represents priority 0 and bit 7
- * represents priority 7. # A value of 0 indicates that the
- * priority is not configurable by the hwrm_queue_pfcenable_cfg
- * command. # A value of 1 indicates that the priority is
- * configurable. # A hwrm_queue_pfcenable_cfg command shall
- * return error when trying to configure a priority that is not
- * configurable.
- */
- uint8_t queue_pri2cos_cfg_allowed;
+ * hwrm_queue_pfcenable_cfg command.
+ *
+ * Each bit represents a specific priority where bit 0 represents
+ * priority 0 and bit 7 represents priority 7.
+ * # A value of 0 indicates that the priority is not configurable by
+ * the hwrm_queue_pfcenable_cfg command.
+ * # A value of 1 indicates that the priority is configurable.
+ * # A hwrm_queue_pfcenable_cfg command shall return error when
+ * trying to configure a priority that is not configurable.
+ */
+ uint8_t queue_pfcenable_cfg_allowed;
/*
* Bitmask indicating which queues can be configured by the
- * hwrm_queue_pri2cos_cfg command. Each bit represents a
- * specific queue where bit 0 represents queue 0 and bit 7
- * represents queue 7. # A value of 0 indicates that the queue
- * is not configurable by the hwrm_queue_pri2cos_cfg command. #
- * A value of 1 indicates that the queue is configurable. # A
- * hwrm_queue_pri2cos_cfg command shall return error when trying
- * to configure a queue that is not configurable.
+ * hwrm_queue_pri2cos_cfg command.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * # A value of 0 indicates that the queue is not configurable
+ * by the hwrm_queue_pri2cos_cfg command.
+ * # A value of 1 indicates that the queue is configurable.
+ * # A hwrm_queue_pri2cos_cfg command shall return error when
+ * trying to configure a queue that is not configurable.
*/
- uint8_t queue_cos2bw_cfg_allowed;
+ uint8_t queue_pri2cos_cfg_allowed;
/*
* Bitmask indicating which queues can be configured by the
- * hwrm_queue_pri2cos_cfg command. Each bit represents a
- * specific queue where bit 0 represents queue 0 and bit 7
- * represents queue 7. # A value of 0 indicates that the queue
- * is not configurable by the hwrm_queue_pri2cos_cfg command. #
- * A value of 1 indicates that the queue is configurable. # A
- * hwrm_queue_pri2cos_cfg command shall return error when trying
- * to configure a queue not configurable.
- */
- uint8_t queue_id0;
- /*
- * ID of CoS Queue 0. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ * hwrm_queue_pri2cos_cfg command.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * # A value of 0 indicates that the queue is not configurable
+ * by the hwrm_queue_pri2cos_cfg command.
+ * # A value of 1 indicates that the queue is configurable.
+ * # A hwrm_queue_pri2cos_cfg command shall return error when
+ * trying to configure a queue not configurable.
+ */
+ uint8_t queue_cos2bw_cfg_allowed;
+ /*
+ * ID of CoS Queue 0.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id0_service_profile;
+ uint8_t queue_id0;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id0_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id1;
- /*
- * ID of CoS Queue 1. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 1.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id1_service_profile;
+ uint8_t queue_id1;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id1_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id2;
- /*
- * ID of CoS Queue 2. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 2.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id2_service_profile;
+ uint8_t queue_id2;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id2_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id3;
- /*
- * ID of CoS Queue 3. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 3.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id3_service_profile;
+ uint8_t queue_id3;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id3_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id4;
- /*
- * ID of CoS Queue 4. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 4.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id4_service_profile;
+ uint8_t queue_id4;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id4_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id5;
- /*
- * ID of CoS Queue 5. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 5.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id5_service_profile;
+ uint8_t queue_id5;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id5_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id6;
- /*
- * ID of CoS Queue 6. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 6.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id6_service_profile;
+ uint8_t queue_id6;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id6_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id7;
- /*
- * ID of CoS Queue 7. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 7.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id7_service_profile;
+ uint8_t queue_id7;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id7_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t valid;
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/*********************
- * hwrm_port_mac_cfg *
- *********************/
+/*******************
+ * hwrm_queue_qcfg *
+ *******************/
-/* hwrm_port_mac_cfg_input (size:320b/40B) */
-struct hwrm_port_mac_cfg_input {
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ /* The HWRM command request type. */
uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
uint64_t resp_addr;
uint32_t flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- uint32_t enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- uint16_t port_id;
- uint8_t ipg;
- uint8_t lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
- uint8_t vlan_pri2cos_map_pri;
- uint8_t reserved1;
- uint8_t tunnel_pri2cos_map_pri;
- uint8_t dscp2pri_map_pri;
- uint16_t rx_ts_capture_ptp_msg_type;
- uint16_t tx_ts_capture_ptp_msg_type;
- uint8_t cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
- (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
- (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
- (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
- (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
- PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
- (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
- (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
- (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
- (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
- PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- uint8_t unused_0[3];
-};
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX
+ /* Queue ID of the queue. */
+ uint32_t queue_id;
+} __attribute__((packed));
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This value is a the estimate packet length used in the
+ * TX arbiter.
+ */
+ uint32_t queue_len;
+ /* This value is applicable to CoS queues only. */
+ uint8_t service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff)
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN
+ /* Information about queue configuration. */
+ uint8_t queue_cfg_info;
+ /*
+ * If this flag is set to '1', then the queue is
+ * configured asymmetrically on TX and RX sides.
+ * If this flag is set to '0', then this queue is
+ * configured symmetrically on TX and RX sides.
+ */
+ #define HWRM_QUEUE_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/******************
+ * hwrm_queue_cfg *
+ ******************/
-/* hwrm_port_mac_cfg_output (size:128b/16B) */
-struct hwrm_port_mac_cfg_output {
+
+/* hwrm_queue_cfg_input (size:320b/40B) */
+struct hwrm_queue_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX, or both directions applicable to the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_SFT 0
+ /* tx path */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ /* Bi-directional (Symmetrically applicable to TX and RX paths) */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2)
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dflt_len field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_CFG_INPUT_ENABLES_DFLT_LEN UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the service_profile field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_CFG_INPUT_ENABLES_SERVICE_PROFILE UINT32_C(0x2)
+ /* Queue ID of queue that is to be configured by this function. */
+ uint32_t queue_id;
+ /*
+ * This value is a the estimate packet length used in the
+ * TX arbiter.
+ * Set to 0xFF... (All Fs) to not adjust this value.
+ */
+ uint32_t dflt_len;
+ /* This value is applicable to CoS queues only. */
+ uint8_t service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff)
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
+struct hwrm_queue_cfg_output {
+ /* The specific error status for the command. */
uint16_t error_code;
+ /* The HWRM command request type. */
uint16_t req_type;
+ /* The sequence ID from the original command. */
uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint16_t mru;
- uint16_t mtu;
- uint8_t ipg;
- uint8_t lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
- uint8_t unused_0;
- uint8_t valid;
-};
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/*****************************
+ * hwrm_queue_pfcenable_qcfg *
+ *****************************/
-/**********************
- * hwrm_port_mac_qcfg *
- **********************/
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
-/* hwrm_port_mac_qcfg_input (size:192b/24B) */
-struct hwrm_port_mac_qcfg_input {
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /* If set to 1, then PFC is enabled on PRI 0. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI0_PFC_ENABLED \
+ UINT32_C(0x1)
+ /* If set to 1, then PFC is enabled on PRI 1. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI1_PFC_ENABLED \
+ UINT32_C(0x2)
+ /* If set to 1, then PFC is enabled on PRI 2. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI2_PFC_ENABLED \
+ UINT32_C(0x4)
+ /* If set to 1, then PFC is enabled on PRI 3. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI3_PFC_ENABLED \
+ UINT32_C(0x8)
+ /* If set to 1, then PFC is enabled on PRI 4. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI4_PFC_ENABLED \
+ UINT32_C(0x10)
+ /* If set to 1, then PFC is enabled on PRI 5. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI5_PFC_ENABLED \
+ UINT32_C(0x20)
+ /* If set to 1, then PFC is enabled on PRI 6. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI6_PFC_ENABLED \
+ UINT32_C(0x40)
+ /* If set to 1, then PFC is enabled on PRI 7. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI7_PFC_ENABLED \
+ UINT32_C(0x80)
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_queue_pfcenable_cfg *
+ ****************************/
+
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_cfg_input {
+ /* The HWRM command request type. */
uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
uint64_t resp_addr;
+ uint32_t flags;
+ /* If set to 1, then PFC is requested to be enabled on PRI 0. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI0_PFC_ENABLED \
+ UINT32_C(0x1)
+ /* If set to 1, then PFC is requested to be enabled on PRI 1. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI1_PFC_ENABLED \
+ UINT32_C(0x2)
+ /* If set to 1, then PFC is requested to be enabled on PRI 2. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI2_PFC_ENABLED \
+ UINT32_C(0x4)
+ /* If set to 1, then PFC is requested to be enabled on PRI 3. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI3_PFC_ENABLED \
+ UINT32_C(0x8)
+ /* If set to 1, then PFC is requested to be enabled on PRI 4. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI4_PFC_ENABLED \
+ UINT32_C(0x10)
+ /* If set to 1, then PFC is requested to be enabled on PRI 5. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI5_PFC_ENABLED \
+ UINT32_C(0x20)
+ /* If set to 1, then PFC is requested to be enabled on PRI 6. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI6_PFC_ENABLED \
+ UINT32_C(0x40)
+ /* If set to 1, then PFC is requested to be enabled on PRI 7. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI7_PFC_ENABLED \
+ UINT32_C(0x80)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
uint16_t port_id;
- uint8_t unused_0[6];
-};
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/***************************
+ * hwrm_queue_pri2cos_qcfg *
+ ***************************/
-/* hwrm_port_mac_qcfg_output (size:192b/24B) */
-struct hwrm_port_mac_qcfg_output {
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX
+ /*
+ * When this bit is set to '0', the query is
+ * for VLAN PRI field in tunnel headers.
+ * When this bit is set to '1', the query is
+ * for VLAN PRI field in inner packet headers.
+ */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN UINT32_C(0x2)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ /* The specific error status for the command. */
uint16_t error_code;
+ /* The HWRM command request type. */
uint16_t req_type;
+ /* The sequence ID from the original command. */
uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint16_t mru;
- uint16_t mtu;
- uint8_t ipg;
- uint8_t lpbk;
- #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE
- uint8_t vlan_pri2cos_map_pri;
- uint8_t flags;
- #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL
- #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL
- #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL
- #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL
- uint8_t tunnel_pri2cos_map_pri;
- uint8_t dscp2pri_map_pri;
- uint16_t rx_ts_capture_ptp_msg_type;
- uint16_t tx_ts_capture_ptp_msg_type;
- uint8_t cos_field_cfg;
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
- (0x0UL << 1)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
- (0x1UL << 1)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
- (0x2UL << 1)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
- (0x3UL << 1)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
- PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
- (0x0UL << 3)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
- (0x1UL << 3)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
- (0x2UL << 3)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
- (0x3UL << 3)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
- PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- uint8_t valid;
-};
+ /*
+ * CoS Queue assigned to priority 0. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri0_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 1. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri1_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 2 This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri2_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 3. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri3_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 4. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri4_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 5. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri5_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 6. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri6_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 7. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri7_cos_queue_id;
+ /* Information about queue configuration. */
+ uint8_t queue_cfg_info;
+ /*
+ * If this flag is set to '1', then the PRI to CoS
+ * configuration is asymmetric on TX and RX sides.
+ * If this flag is set to '0', then PRI to CoS configuration
+ * is symmetric on TX and RX sides.
+ */
+ #define HWRM_QUEUE_PRI2COS_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_queue_pri2cos_cfg *
+ **************************/
+
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+struct hwrm_queue_pri2cos_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX, or both directions applicable to the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_SFT 0
+ /* tx path */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ /* Bi-directional (Symmetrically applicable to TX and RX paths) */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2)
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR
+ /*
+ * When this bit is set to '0', the mapping is requested
+ * for VLAN PRI field in tunnel headers.
+ * When this bit is set to '1', the mapping is requested
+ * for VLAN PRI field in inner packet headers.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_IVLAN UINT32_C(0x4)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the pri0_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI0_COS_QUEUE_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the pri1_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI1_COS_QUEUE_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the pri2_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI2_COS_QUEUE_ID \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the pri3_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI3_COS_QUEUE_ID \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the pri4_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI4_COS_QUEUE_ID \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the pri5_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI5_COS_QUEUE_ID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the pri6_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI6_COS_QUEUE_ID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the pri7_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI7_COS_QUEUE_ID \
+ UINT32_C(0x80)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ /*
+ * CoS Queue assigned to priority 0. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri0_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 1. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri1_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 2 This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri2_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 3. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri3_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 4. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri4_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 5. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri5_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 6. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri6_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 7. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri7_cos_queue_id;
+ uint8_t unused_0[7];
+} __attribute__((packed));
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+struct hwrm_queue_pri2cos_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
/**************************
- * hwrm_port_mac_ptp_qcfg *
+ * hwrm_queue_cos2bw_qcfg *
**************************/
-/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
-struct hwrm_port_mac_ptp_qcfg_input {
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ /* The HWRM command request type. */
uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
uint64_t resp_addr;
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure TC BW assignment on this port.
+ */
uint16_t port_id;
- uint8_t unused_0[6];
-};
+ uint8_t unused_0[6];
+} __attribute__((packed));
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* ID of CoS Queue 0. */
+ uint8_t queue_id0;
+ uint8_t unused_0;
+ uint16_t unused_1;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id0_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id0_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id0_bw_weight;
+ /* ID of CoS Queue 1. */
+ uint8_t queue_id1;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id1_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id1_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id1_bw_weight;
+ /* ID of CoS Queue 2. */
+ uint8_t queue_id2;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id2_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id2_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id2_bw_weight;
+ /* ID of CoS Queue 3. */
+ uint8_t queue_id3;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id3_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id3_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id3_bw_weight;
+ /* ID of CoS Queue 4. */
+ uint8_t queue_id4;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id4_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id4_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id4_bw_weight;
+ /* ID of CoS Queue 5. */
+ uint8_t queue_id5;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id5_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id5_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id5_bw_weight;
+ /* ID of CoS Queue 6. */
+ uint8_t queue_id6;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id6_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id6_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id6_bw_weight;
+ /* ID of CoS Queue 7. */
+ uint8_t queue_id7;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id7_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id7_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id7_bw_weight;
+ uint8_t unused_2[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
-/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
-struct hwrm_port_mac_ptp_qcfg_output {
+/*************************
+ * hwrm_queue_cos2bw_cfg *
+ *************************/
+
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+struct hwrm_queue_cos2bw_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint32_t enables;
+ /*
+ * If this bit is set to 1, then all queue_id0 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID0_VALID \
+ UINT32_C(0x1)
+ /*
+ * If this bit is set to 1, then all queue_id1 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID1_VALID \
+ UINT32_C(0x2)
+ /*
+ * If this bit is set to 1, then all queue_id2 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID2_VALID \
+ UINT32_C(0x4)
+ /*
+ * If this bit is set to 1, then all queue_id3 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID3_VALID \
+ UINT32_C(0x8)
+ /*
+ * If this bit is set to 1, then all queue_id4 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID4_VALID \
+ UINT32_C(0x10)
+ /*
+ * If this bit is set to 1, then all queue_id5 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID5_VALID \
+ UINT32_C(0x20)
+ /*
+ * If this bit is set to 1, then all queue_id6 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID6_VALID \
+ UINT32_C(0x40)
+ /*
+ * If this bit is set to 1, then all queue_id7 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID7_VALID \
+ UINT32_C(0x80)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure TC BW assignment on this port.
+ */
+ uint16_t port_id;
+ /* ID of CoS Queue 0. */
+ uint8_t queue_id0;
+ uint8_t unused_0;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id0_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id0_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id0_bw_weight;
+ /* ID of CoS Queue 1. */
+ uint8_t queue_id1;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id1_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id1_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id1_bw_weight;
+ /* ID of CoS Queue 2. */
+ uint8_t queue_id2;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id2_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id2_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id2_bw_weight;
+ /* ID of CoS Queue 3. */
+ uint8_t queue_id3;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id3_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id3_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id3_bw_weight;
+ /* ID of CoS Queue 4. */
+ uint8_t queue_id4;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id4_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id4_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id4_bw_weight;
+ /* ID of CoS Queue 5. */
+ uint8_t queue_id5;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id5_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id5_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id5_bw_weight;
+ /* ID of CoS Queue 6. */
+ uint8_t queue_id6;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id6_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id6_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id6_bw_weight;
+ /* ID of CoS Queue 7. */
+ uint8_t queue_id7;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id7_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id7_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id7_bw_weight;
+ uint8_t unused_1[5];
+} __attribute__((packed));
+
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+struct hwrm_queue_cos2bw_cfg_output {
+ /* The specific error status for the command. */
uint16_t error_code;
+ /* The HWRM command request type. */
uint16_t req_type;
+ /* The sequence ID from the original command. */
uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
- uint8_t unused_0[3];
- uint32_t rx_ts_reg_off_lower;
- uint32_t rx_ts_reg_off_upper;
- uint32_t rx_ts_reg_off_seq_id;
- uint32_t rx_ts_reg_off_src_id_0;
- uint32_t rx_ts_reg_off_src_id_1;
- uint32_t rx_ts_reg_off_src_id_2;
- uint32_t rx_ts_reg_off_domain_id;
- uint32_t rx_ts_reg_off_fifo;
- uint32_t rx_ts_reg_off_fifo_adv;
- uint32_t rx_ts_reg_off_granularity;
- uint32_t tx_ts_reg_off_lower;
- uint32_t tx_ts_reg_off_upper;
- uint32_t tx_ts_reg_off_seq_id;
- uint32_t tx_ts_reg_off_fifo;
- uint32_t tx_ts_reg_off_granularity;
- uint8_t unused_1[7];
- uint8_t valid;
-};
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/*************************
+ * hwrm_queue_dscp_qcaps *
+ *************************/
-/* hwrm_vnic_alloc */
-/*
- * Description: This VNIC is a resource in the RX side of the chip that is used
- * to represent a virtual host "interface". # At the time of VNIC allocation or
- * configuration, the function can specify whether it wants the requested VNIC
- * to be the default VNIC for the function or not. # If a function requests
- * allocation of a VNIC for the first time and a VNIC is successfully allocated
- * by the HWRM, then the HWRM shall make the allocated VNIC as the default VNIC
- * for that function. # The default VNIC shall be used for the default action
- * for a partition or function. # For each VNIC allocated on a function, a
- * mapping on the RX side to map the allocated VNIC to source virtual interface
- * shall be performed by the HWRM. This should be hidden to the function driver
- * requesting the VNIC allocation. This enables broadcast/multicast replication
- * with source knockout. # If multicast replication with source knockout is
- * enabled, then the internal VNIC to SVIF mapping data structures shall be
- * programmed at the time of VNIC allocation.
- */
-/* Input (24 bytes) */
-struct hwrm_vnic_alloc_input {
- uint16_t req_type;
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+struct hwrm_queue_dscp_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
/*
- * When this bit is '1', this VNIC is requested to be the
- * default VNIC for this function.
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
*/
- #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
- uint32_t unused_0;
+ uint8_t port_id;
+ uint8_t unused_0[7];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_vnic_alloc_output {
- uint16_t error_code;
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+struct hwrm_queue_dscp_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The number of bits provided by the hardware for the DSCP value. */
+ uint8_t num_dscp_bits;
+ uint8_t unused_0;
+ /* Max number of DSCP-MASK-PRI entries supported. */
+ uint16_t max_entries;
+ uint8_t unused_1[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_queue_dscp2pri_qcfg *
+ ****************************/
+
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t vnic_id;
- /* Logical vnic ID */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the 24-bits DSCP-MASK-PRI
+ * tuple(s) will be copied to.
+ */
+ uint64_t dest_data_addr;
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ uint8_t unused_0;
+ /* Size of the buffer pointed to by dest_data_addr. */
+ uint16_t dest_data_buffer_size;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* hwrm_vnic_free */
-/*
- * Description: Free a VNIC resource. Idle any resources associated with the
- * VNIC as well as the VNIC. Reset and release all resources associated with the
- * VNIC.
- */
-/* Input (24 bytes) */
-struct hwrm_vnic_free_input {
- uint16_t req_type;
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * A count of the number of DSCP-MASK-PRI tuple(s) pointed to
+ * by the dest_data_addr.
*/
- uint16_t cmpl_ring;
+ uint16_t entry_cnt;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This is the default PRI which un-initialized DSCP values are
+ * mapped to.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint8_t default_pri;
+ uint8_t unused_0[4];
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint64_t resp_addr;
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_queue_dscp2pri_cfg *
+ ***************************/
+
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the 24-bits DSCP-MASK-PRI tuple
+ * will be copied from.
+ */
+ uint64_t src_data_addr;
+ uint32_t flags;
+ /* use_hw_default_pri is 1 b */
+ #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_FLAGS_USE_HW_DEFAULT_PRI \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the default_pri field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_ENABLES_DEFAULT_PRI \
+ UINT32_C(0x1)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ /*
+ * This is the default PRI which un-initialized DSCP values will be
+ * mapped to.
+ */
+ uint8_t default_pri;
+ /*
+ * A count of the number of DSCP-MASK-PRI tuple(s) in the data pointed
+ * to by src_data_addr.
+ */
+ uint16_t entry_cnt;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_vnic_alloc *
+ *******************/
+
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is '1', this VNIC is requested to
+ * be the default VNIC for this function.
*/
- uint32_t vnic_id;
+ #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* Logical vnic ID */
- uint32_t unused_0;
+ uint32_t vnic_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_vnic_free_output {
- uint16_t error_code;
+/******************
+ * hwrm_vnic_free *
+ ******************/
+
+
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_cfg */
-/* Description: Configure the RX VNIC structure. */
-/* Input (40 bytes) */
+/*****************
+ * hwrm_vnic_cfg *
+ *****************/
+
+
+/* hwrm_vnic_cfg_input (size:320b/40B) */
struct hwrm_vnic_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC is requested to be the default
- * VNIC for the function.
+ * When this bit is '1', the VNIC is requested to
+ * be the default VNIC for the function.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT \
+ UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC is being configured to strip
- * VLAN in the RX path. If set to '0', then VLAN stripping is
- * disabled on this VNIC.
+ * When this bit is '1', the VNIC is being configured to
+ * strip VLAN in the RX path.
+ * If set to '0', then VLAN stripping is disabled on
+ * this VNIC.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC is being configured to buffer
- * receive packets in the hardware until the host posts new
- * receive buffers. If set to '0', then bd_stall is being
- * configured to be disabled on this VNIC.
+ * When this bit is '1', the VNIC is being configured to
+ * buffer receive packets in the hardware until the host
+ * posts new receive buffers.
+ * If set to '0', then bd_stall is being configured to be
+ * disabled on this VNIC.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC is being configured to receive
- * both RoCE and non-RoCE traffic. If set to '0', then this VNIC
- * is not configured to be operating in dual VNIC mode.
+ * When this bit is '1', the VNIC is being configured to
+ * receive both RoCE and non-RoCE traffic.
+ * If set to '0', then this VNIC is not configured to be
+ * operating in dual VNIC mode.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
+ UINT32_C(0x8)
/*
- * When this flag is set to '1', the VNIC is requested to be
- * configured to receive only RoCE traffic. If this flag is set
- * to '0', then this flag shall be ignored by the HWRM. If
- * roce_dual_vnic_mode flag is set to '1', then the HWRM client
- * shall not set this flag to '1'.
+ * When this flag is set to '1', the VNIC is requested to
+ * be configured to receive only RoCE traffic.
+ * If this flag is set to '0', then this flag shall be
+ * ignored by the HWRM.
+ * If roce_dual_vnic_mode flag is set to '1'
+ * or roce_mirroring_capable_vnic_mode flag to 1,
+ * then the HWRM client shall not set this flag to '1'.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
+ UINT32_C(0x10)
/*
* When a VNIC uses one destination ring group for certain
- * application (e.g. Receive Flow Steering) where exact match is
- * used to direct packets to a VNIC with one destination ring
- * group only, there is no need to configure RSS indirection
- * table for that VNIC as only one destination ring group is
- * used. This flag is used to enable a mode where RSS is enabled
- * in the VNIC using a RSS context for computing RSS hash but
- * the RSS indirection table is not configured using
- * hwrm_vnic_rss_cfg. If this mode is enabled, then the driver
- * should not program RSS indirection table for the RSS context
- * that is used for computing RSS hash only.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20)
- /*
- * When this bit is '1', the VNIC is being configured to receive
- * both RoCE and non-RoCE traffic, but forward only the RoCE
- * traffic further. Also, RoCE traffic can be mirrored to L2
- * driver.
+ * application (e.g. Receive Flow Steering) where
+ * exact match is used to direct packets to a VNIC with one
+ * destination ring group only, there is no need to configure
+ * RSS indirection table for that VNIC as only one destination
+ * ring group is used.
+ *
+ * This flag is used to enable a mode where
+ * RSS is enabled in the VNIC using a RSS context
+ * for computing RSS hash but the RSS indirection table is
+ * not configured using hwrm_vnic_rss_cfg.
+ *
+ * If this mode is enabled, then the driver should not program
+ * RSS indirection table for the RSS context that is used for
+ * computing RSS hash only.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is being configured to
+ * receive both RoCE and non-RoCE traffic, but forward only the
+ * RoCE traffic further. Also, RoCE traffic can be mirrored to
+ * L2 driver.
*/
#define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
- UINT32_C(0x40)
- uint32_t enables;
+ UINT32_C(0x40)
+ uint32_t enables;
/*
* This bit must be '1' for the dflt_ring_grp field to be
* configured.
*/
- #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP UINT32_C(0x1)
- /* This bit must be '1' for the rss_rule field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE UINT32_C(0x2)
- /* This bit must be '1' for the cos_rule field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE UINT32_C(0x4)
- /* This bit must be '1' for the lb_rule field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE UINT32_C(0x8)
- /* This bit must be '1' for the mru field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU UINT32_C(0x10)
- uint16_t vnic_id;
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the rss_rule field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cos_rule field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the lb_rule field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the mru field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the default_rx_ring_id field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the default_cmpl_ring_id field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \
+ UINT32_C(0x40)
/* Logical vnic ID */
- uint16_t dflt_ring_grp;
+ uint16_t vnic_id;
/*
- * Default Completion ring for the VNIC. This ring will be
- * chosen if packet does not match any RSS rules and if there is
- * no COS rule.
+ * Default Completion ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules and if
+ * there is no COS rule.
*/
- uint16_t rss_rule;
+ uint16_t dflt_ring_grp;
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
* there is no RSS rule.
*/
- uint16_t cos_rule;
+ uint16_t rss_rule;
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
* there is no COS rule.
*/
- uint16_t lb_rule;
+ uint16_t cos_rule;
/*
- * RSS ID for load balancing rule/table structure. 0xFF... (All
- * Fs) if there is no LB rule.
+ * RSS ID for load balancing rule/table structure.
+ * 0xFF... (All Fs) if there is no LB rule.
*/
- uint16_t mru;
+ uint16_t lb_rule;
/*
- * The maximum receive unit of the vnic. Each vnic is associated
- * with a function. The vnic mru value overwrites the mru
- * setting of the associated function. The HWRM shall make sure
- * that vnic mru does not exceed the mru of the port the
- * function is associated with.
+ * The maximum receive unit of the vnic.
+ * Each vnic is associated with a function.
+ * The vnic mru value overwrites the mru setting of the
+ * associated function.
+ * The HWRM shall make sure that vnic mru does not exceed
+ * the mru of the port the function is associated with.
*/
- uint32_t unused_0;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_vnic_cfg_output {
- uint16_t error_code;
+ uint16_t mru;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * Default Rx ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules.
+ * The aggregation ring associated with the Rx ring is
+ * implied based on the Rx ring specified when the
+ * aggregation ring was allocated.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t default_rx_ring_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * Default completion ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t default_cmpl_ring_id;
+} __attribute__((packed));
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_qcfg */
-/*
- * Description: Query the RX VNIC structure. This function can be used by a PF
- * driver to query its own VNIC resource or VNIC resource of its child VF. This
- * function can also be used by a VF driver to query its own VNIC resource.
- */
-/* Input (32 bytes) */
+/******************
+ * hwrm_vnic_qcfg *
+ ******************/
+
+
+/* hwrm_vnic_qcfg_input (size:256b/32B) */
struct hwrm_vnic_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the vf_id_valid field to be
+ * configured.
*/
- uint32_t enables;
- /* This bit must be '1' for the vf_id_valid field to be configured. */
- #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
- uint32_t vnic_id;
+ #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
/* Logical vnic ID */
- uint16_t vf_id;
+ uint32_t vnic_id;
/* ID of Virtual Function whose VNIC resource is being queried. */
- uint16_t unused_0[3];
+ uint16_t vf_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (32 bytes) */
+/* hwrm_vnic_qcfg_output (size:256b/32B) */
struct hwrm_vnic_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint16_t dflt_ring_grp;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* Default Completion ring for the VNIC. */
- uint16_t rss_rule;
+ uint16_t dflt_ring_grp;
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
* there is no RSS rule.
*/
- uint16_t cos_rule;
+ uint16_t rss_rule;
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
* there is no COS rule.
*/
- uint16_t lb_rule;
+ uint16_t cos_rule;
/*
- * RSS ID for load balancing rule/table structure. 0xFF... (All
- * Fs) if there is no LB rule.
+ * RSS ID for load balancing rule/table structure.
+ * 0xFF... (All Fs) if there is no LB rule.
*/
- uint16_t mru;
+ uint16_t lb_rule;
/* The maximum receive unit of the vnic. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t flags;
+ uint16_t mru;
+ uint8_t unused_0[2];
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC is the default VNIC for the
- * function.
+ * When this bit is '1', the VNIC is the default VNIC for
+ * the function.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT \
+ UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC is configured to strip VLAN in
- * the RX path. If set to '0', then VLAN stripping is disabled
- * on this VNIC.
+ * When this bit is '1', the VNIC is configured to
+ * strip VLAN in the RX path.
+ * If set to '0', then VLAN stripping is disabled on
+ * this VNIC.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC is configured to buffer
- * receive packets in the hardware until the host posts new
- * receive buffers. If set to '0', then bd_stall is disabled on
+ * When this bit is '1', the VNIC is configured to
+ * buffer receive packets in the hardware until the host
+ * posts new receive buffers.
+ * If set to '0', then bd_stall is disabled on
* this VNIC.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC is configured to receive both
- * RoCE and non-RoCE traffic. If set to '0', then this VNIC is
- * not configured to operate in dual VNIC mode.
+ * When this bit is '1', the VNIC is configured to
+ * receive both RoCE and non-RoCE traffic.
+ * If set to '0', then this VNIC is not configured to
+ * operate in dual VNIC mode.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8)
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
+ UINT32_C(0x8)
/*
* When this flag is set to '1', the VNIC is configured to
- * receive only RoCE traffic. When this flag is set to '0', the
- * VNIC is not configured to receive only RoCE traffic. If
- * roce_dual_vnic_mode flag and this flag both are set to '1',
- * then it is an invalid configuration of the VNIC. The HWRM
- * should not allow that type of mis-configuration by HWRM
- * clients.
- */
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ * receive only RoCE traffic.
+ * When this flag is set to '0', the VNIC is not configured
+ * to receive only RoCE traffic.
+ * If roce_dual_vnic_mode flag and this flag both are set
+ * to '1', then it is an invalid configuration of the
+ * VNIC. The HWRM should not allow that type of
+ * mis-configuration by HWRM clients.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
+ UINT32_C(0x10)
/*
* When a VNIC uses one destination ring group for certain
- * application (e.g. Receive Flow Steering) where exact match is
- * used to direct packets to a VNIC with one destination ring
- * group only, there is no need to configure RSS indirection
- * table for that VNIC as only one destination ring group is
- * used. When this bit is set to '1', then the VNIC is enabled
- * in a mode where RSS is enabled in the VNIC using a RSS
- * context for computing RSS hash but the RSS indirection table
- * is not configured.
- */
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20)
+ * application (e.g. Receive Flow Steering) where
+ * exact match is used to direct packets to a VNIC with one
+ * destination ring group only, there is no need to configure
+ * RSS indirection table for that VNIC as only one destination
+ * ring group is used.
+ *
+ * When this bit is set to '1', then the VNIC is enabled in a
+ * mode where RSS is enabled in the VNIC using a RSS context
+ * for computing RSS hash but the RSS indirection table is
+ * not configured.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE \
+ UINT32_C(0x20)
/*
- * When this bit is '1', the VNIC is configured to receive both
- * RoCE and non-RoCE traffic, but forward only RoCE traffic
- * further. Also RoCE traffic can be mirrored to L2 driver.
+ * When this bit is '1', the VNIC is configured to
+ * receive both RoCE and non-RoCE traffic, but forward only
+ * RoCE traffic further. Also RoCE traffic can be mirrored to
+ * L2 driver.
*/
#define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
- UINT32_C(0x40)
- uint32_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t unused_5;
- uint8_t valid;
+ UINT32_C(0x40)
+ uint8_t unused_1[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
+/*******************
+ * hwrm_vnic_qcaps *
+ *******************/
+
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ uint8_t unused_0[4];
+} __attribute__((packed));
-/* hwrm_vnic_tpa_cfg */
-/* Description: This function is used to enable/configure TPA on the VNIC. */
-/* Input (40 bytes) */
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The maximum receive unit that is settable on a vnic. */
+ uint16_t mru;
+ uint8_t unused_0[2];
+ uint32_t flags;
+ /* Unused. */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_UNUSED \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the capability of stripping VLAN in
+ * the RX path is supported on VNIC(s).
+ * If set to '0', then VLAN stripping capability is
+ * not supported on VNIC(s).
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the capability to buffer receive
+ * packets in the hardware until the host posts new receive buffers
+ * is supported on VNIC(s).
+ * If set to '0', then bd_stall capability is not supported
+ * on VNIC(s).
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_BD_STALL_CAP \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the capability to
+ * receive both RoCE and non-RoCE traffic on VNIC(s) is
+ * supported.
+ * If set to '0', then the capability to receive
+ * both RoCE and non-RoCE traffic on VNIC(s) is
+ * not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_DUAL_VNIC_CAP \
+ UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', the capability to configure
+ * a VNIC to receive only RoCE traffic is supported.
+ * When this flag is set to '0', the VNIC capability to
+ * configure to receive only RoCE traffic is not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_ONLY_VNIC_CAP \
+ UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', then the capability to enable
+ * a VNIC in a mode where RSS context without configuring
+ * RSS indirection table is supported (for RSS hash computation).
+ * When this bit is set to '0', then a VNIC can not be configured
+ * with a mode to enable RSS context without configuring RSS
+ * indirection table.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_DFLT_CR_CAP \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the capability to
+ * mirror the the RoCE traffic is supported.
+ * If set to '0', then the capability to mirror the
+ * RoCE traffic is not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', the outermost RSS hashing capability
+ * is supported. If set to '0', then the outermost RSS hashing
+ * capability is not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \
+ UINT32_C(0x80)
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_vnic_tpa_cfg *
+ *********************/
+
+
+/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
struct hwrm_vnic_tpa_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) of non-tunneled TCP
- * packets.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) of
+ * non-tunneled TCP packets.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA \
+ UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) of tunneled TCP packets.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) of
+ * tunneled TCP packets.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) according to Windows
- * Receive Segment Coalescing (RSC) rules.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Windows Receive Segment Coalescing (RSC) rules.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) according to Linux
- * Generic Receive Offload (GRO) rules.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Linux Generic Receive Offload (GRO) rules.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO \
+ UINT32_C(0x8)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) for TCP packets with IP
- * ECN set to non-zero.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for TCP
+ * packets with IP ECN set to non-zero.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN \
+ UINT32_C(0x10)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) for GRE tunneled TCP
- * packets only if all packets have the same GRE sequence.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * GRE tunneled TCP packets only if all packets have the
+ * same GRE sequence.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
UINT32_C(0x20)
/*
- * When this bit is '1' and the GRO mode is enabled, the VNIC
- * shall be configured to perform transparent packet aggregation
- * (TPA) for TCP/IPv4 packets with consecutively increasing
- * IPIDs. In other words, the last packet that is being
- * aggregated to an already existing aggregation context shall
- * have IPID 1 more than the IPID of the last packet that was
- * aggregated in that aggregation context.
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP/IPv4 packets with consecutively increasing IPIDs.
+ * In other words, the last packet that is being
+ * aggregated to an already existing aggregation context
+ * shall have IPID 1 more than the IPID of the last packet
+ * that was aggregated in that aggregation context.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
+ * value.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK \
+ UINT32_C(0x80)
+ uint32_t enables;
/*
- * When this bit is '1' and the GRO mode is enabled, the VNIC
- * shall be configured to perform transparent packet aggregation
- * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit
- * (IPv6) value.
+ * This bit must be '1' for the max_agg_segs field to be
+ * configured.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80)
- uint32_t enables;
- /* This bit must be '1' for the max_agg_segs field to be configured. */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
- /* This bit must be '1' for the max_aggs field to be configured. */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the max_aggs field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
/*
* This bit must be '1' for the max_agg_timer field to be
* configured.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
- /* This bit must be '1' for the min_agg_len field to be configured. */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
- uint16_t vnic_id;
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the min_agg_len field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
/* Logical vnic ID */
- uint16_t max_agg_segs;
+ uint16_t vnic_id;
/*
- * This is the maximum number of TCP segments that can be
- * aggregated (unit is Log2). Max value is 31.
+ * This is the maximum number of TCP segments that can
+ * be aggregated (unit is Log2). Max value is 31.
*/
+ uint16_t max_agg_segs;
/* 1 segment */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
/* 2 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
/* 4 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
/* 8 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
/* Any segment size larger than this is not valid */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
- uint16_t max_aggs;
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_LAST \
+ HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX
/*
* This is the maximum number of aggregations this VNIC is
- * allowed (unit is Log2). Max value is 7
+ * allowed (unit is Log2). Max value is 7
*/
+ uint16_t max_aggs;
/* 1 aggregation */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
/* 2 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
/* 4 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
/* 8 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
/* 16 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
/* Any aggregation size larger than this is not valid */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t max_agg_timer;
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_LAST \
+ HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX
+ uint8_t unused_0[2];
/*
- * This is the maximum amount of time allowed for an aggregation
- * context to complete after it was initiated.
+ * This is the maximum amount of time allowed for
+ * an aggregation context to complete after it was initiated.
*/
- uint32_t min_agg_len;
+ uint32_t max_agg_timer;
/*
* This is the minimum amount of payload length required to
* start an aggregation context.
*/
+ uint32_t min_agg_len;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
struct hwrm_vnic_tpa_cfg_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_vnic_tpa_qcfg *
+ **********************/
+
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical vnic ID */
+ uint16_t vnic_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) of
+ * non-tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_TPA \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) of
+ * tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_ENCAP_TPA \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Windows Receive Segment Coalescing (RSC) rules.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_RSC_WND_UPDATE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Linux Generic Receive Offload (GRO) rules.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for TCP
+ * packets with IP ECN set to non-zero.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_ECN \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * GRE tunneled TCP packets only if all packets have the
+ * same GRE sequence.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP/IPv4 packets with consecutively increasing IPIDs.
+ * In other words, the last packet that is being
+ * aggregated to an already existing aggregation context
+ * shall have IPID 1 more than the IPID of the last packet
+ * that was aggregated in that aggregation context.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_IPID_CHECK \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
+ * value.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_TTL_CHECK \
+ UINT32_C(0x80)
+ /*
+ * This is the maximum number of TCP segments that can
+ * be aggregated (unit is Log2). Max value is 31.
+ */
+ uint16_t max_agg_segs;
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_LAST \
+ HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7
+ */
+ uint16_t max_aggs;
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_LAST \
+ HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX
+ /*
+ * This is the maximum amount of time allowed for
+ * an aggregation context to complete after it was initiated.
+ */
+ uint32_t max_agg_timer;
+ /*
+ * This is the minimum amount of payload length required to
+ * start an aggregation context.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint32_t min_agg_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_rss_cfg */
-/* Description: This function is used to enable RSS configuration. */
-/* Input (48 bytes) */
+/*********************
+ * hwrm_vnic_rss_cfg *
+ *********************/
+
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
struct hwrm_vnic_rss_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t hash_type;
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv4
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of UDP/IPv4 packets.
*/
- uint32_t hash_type;
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source and destination IPv4 addresses of IPv4 packets.
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv6
+ * packets.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and source/destination
- * ports of TCP/IPv4 packets.
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of TCP/IPv6 packets.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of UDP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
+ /* VNIC ID of VNIC associated with RSS table being configured. */
+ uint16_t vnic_id;
+ /*
+ * Specifies which VNIC ring table pair to configure.
+ * Valid values range from 0 to 7.
+ */
+ uint8_t ring_table_pair_index;
+ /* Flags to specify different RSS hash modes. */
+ uint8_t hash_mode_flags;
+ /*
+ * When this bit is '1', it indicates using current RSS
+ * hash mode setting configured in the device.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT \
+ UINT32_C(0x1)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and source/destination
- * ports of UDP/IPv4 packets.
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
+ * l4.src, l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4 \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source and destination IPv4 addresses of IPv6 packets.
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and source/destination
- * ports of TCP/IPv6 packets.
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
+ * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
+ UINT32_C(0x8)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and source/destination
- * ports of UDP/IPv6 packets.
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
- uint32_t unused_0;
- uint64_t ring_grp_tbl_addr;
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
+ UINT32_C(0x10)
/* This is the address for rss ring group table */
- uint64_t hash_key_tbl_addr;
+ uint64_t ring_grp_tbl_addr;
/* This is the address for rss hash key table */
- uint16_t rss_ctx_idx;
+ uint64_t hash_key_tbl_addr;
/* Index to the rss indirection table. */
- uint16_t unused_1[3];
+ uint16_t rss_ctx_idx;
+ uint8_t unused_1[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
struct hwrm_vnic_rss_cfg_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_vnic_rss_qcfg *
+ **********************/
+
+
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Index to the rss indirection table. */
+ uint16_t rss_ctx_idx;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_vnic_plcmodes_cfg */
-/*
- * Description: This function can be used to set placement mode configuration of
- * the VNIC.
- */
-/* Input (40 bytes) */
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t hash_type;
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv4
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of UDP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv6
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of TCP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of UDP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
+ uint8_t unused_0[4];
+ /* This is the value of rss hash key */
+ uint32_t hash_key[10];
+ /* Flags to specify different RSS hash modes. */
+ uint8_t hash_mode_flags;
+ /*
+ * When this bit is '1', it indicates using current RSS
+ * hash mode setting configured in the device.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_DEFAULT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
+ * l4.src, l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_4 \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_2 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
+ * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
+ UINT32_C(0x10)
+ uint8_t unused_1[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_vnic_plcmodes_cfg *
+ **************************/
+
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC shall be configured to use regular
- * placement algorithm. By default, the regular placement algorithm
- * shall be enabled on the VNIC.
+ * When this bit is '1', the VNIC shall be configured to
+ * use regular placement algorithm.
+ * By default, the regular placement algorithm shall be
+ * enabled on the VNIC.
*/
#define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \
UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC shall be configured use the jumbo
- * placement algorithm.
+ * When this bit is '1', the VNIC shall be configured
+ * use the jumbo placement algorithm.
*/
#define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \
UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC shall be configured to enable Header-
- * Data split for IPv4 packets according to the following rules: # If
- * the packet is identified as TCP/IPv4, then the packet is split at the
- * beginning of the TCP payload. # If the packet is identified as
- * UDP/IPv4, then the packet is split at the beginning of UDP payload. #
- * If the packet is identified as non-TCP and non-UDP IPv4 packet, then
- * the packet is split at the beginning of the upper layer protocol
- * header carried in the IPv4 packet.
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for IPv4 packets according
+ * to the following rules:
+ * # If the packet is identified as TCP/IPv4, then the
+ * packet is split at the beginning of the TCP payload.
+ * # If the packet is identified as UDP/IPv4, then the
+ * packet is split at the beginning of UDP payload.
+ * # If the packet is identified as non-TCP and non-UDP
+ * IPv4 packet, then the packet is split at the beginning
+ * of the upper layer protocol header carried in the IPv4
+ * packet.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 UINT32_C(0x4)
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC shall be configured to enable Header-
- * Data split for IPv6 packets according to the following rules: # If
- * the packet is identified as TCP/IPv6, then the packet is split at the
- * beginning of the TCP payload. # If the packet is identified as
- * UDP/IPv6, then the packet is split at the beginning of UDP payload. #
- * If the packet is identified as non-TCP and non-UDP IPv6 packet, then
- * the packet is split at the beginning of the upper layer protocol
- * header carried in the IPv6 packet.
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for IPv6 packets according
+ * to the following rules:
+ * # If the packet is identified as TCP/IPv6, then the
+ * packet is split at the beginning of the TCP payload.
+ * # If the packet is identified as UDP/IPv6, then the
+ * packet is split at the beginning of UDP payload.
+ * # If the packet is identified as non-TCP and non-UDP
+ * IPv6 packet, then the packet is split at the beginning
+ * of the upper layer protocol header carried in the IPv6
+ * packet.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 UINT32_C(0x8)
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 \
+ UINT32_C(0x8)
/*
- * When this bit is '1', the VNIC shall be configured to enable Header-
- * Data split for FCoE packets at the beginning of FC payload.
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for FCoE packets at the
+ * beginning of FC payload.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE UINT32_C(0x10)
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE \
+ UINT32_C(0x10)
/*
- * When this bit is '1', the VNIC shall be configured to enable Header-
- * Data split for RoCE packets at the beginning of RoCE payload (after
- * BTH/GRH headers).
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for RoCE packets at the
+ * beginning of RoCE payload (after BTH/GRH headers).
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE UINT32_C(0x20)
- uint32_t enables;
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE \
+ UINT32_C(0x20)
+ uint32_t enables;
/*
* This bit must be '1' for the jumbo_thresh_valid field to be
* configured.
@@ -8188,7 +18637,8 @@ struct hwrm_vnic_plcmodes_cfg_input {
#define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \
UINT32_C(0x1)
/*
- * This bit must be '1' for the hds_offset_valid field to be configured.
+ * This bit must be '1' for the hds_offset_valid field to be
+ * configured.
*/
#define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \
UINT32_C(0x2)
@@ -8198,523 +18648,568 @@ struct hwrm_vnic_plcmodes_cfg_input {
*/
#define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \
UINT32_C(0x4)
- uint32_t vnic_id;
/* Logical vnic ID */
- uint16_t jumbo_thresh;
+ uint32_t vnic_id;
/*
- * When jumbo placement algorithm is enabled, this value is used to
- * determine the threshold for jumbo placement. Packets with length
- * larger than this value will be placed according to the jumbo
- * placement algorithm.
+ * When jumbo placement algorithm is enabled, this value
+ * is used to determine the threshold for jumbo placement.
+ * Packets with length larger than this value will be
+ * placed according to the jumbo placement algorithm.
*/
- uint16_t hds_offset;
+ uint16_t jumbo_thresh;
/*
- * This value is used to determine the offset into packet buffer where
- * the split data (payload) will be placed according to one of of HDS
- * placement algorithm. The lengths of packet buffers provided for split
- * data shall be larger than this value.
+ * This value is used to determine the offset into
+ * packet buffer where the split data (payload) will be
+ * placed according to one of of HDS placement algorithm.
+ *
+ * The lengths of packet buffers provided for split data
+ * shall be larger than this value.
*/
- uint16_t hds_threshold;
+ uint16_t hds_offset;
/*
- * When one of the HDS placement algorithm is enabled, this value is
- * used to determine the threshold for HDS placement. Packets with
- * length larger than this value will be placed according to the HDS
- * placement algorithm. This value shall be in multiple of 4 bytes.
+ * When one of the HDS placement algorithm is enabled, this
+ * value is used to determine the threshold for HDS
+ * placement.
+ * Packets with length larger than this value will be
+ * placed according to the HDS placement algorithm.
+ * This value shall be in multiple of 4 bytes.
*/
- uint16_t unused_0[3];
+ uint16_t hds_threshold;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
struct hwrm_vnic_plcmodes_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_plcmodes_qcfg */
-/*
- * Description: This function can be used to query placement mode configuration
- * of the VNIC.
- */
-/* Input (24 bytes) */
+/***************************
+ * hwrm_vnic_plcmodes_qcfg *
+ ***************************/
+
+
+/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */
struct hwrm_vnic_plcmodes_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t vnic_id;
+ uint64_t resp_addr;
/* Logical vnic ID */
- uint32_t unused_0;
+ uint32_t vnic_id;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Output (24 bytes) */
+/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */
struct hwrm_vnic_plcmodes_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
- */
- uint32_t flags;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC is configured to use regular placement
- * algorithm.
+ * When this bit is '1', the VNIC is configured to
+ * use regular placement algorithm.
*/
#define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \
UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC is configured to use the jumbo
- * placement algorithm.
+ * When this bit is '1', the VNIC is configured to
+ * use the jumbo placement algorithm.
*/
#define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \
UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC is configured to enable Header-Data
- * split for IPv4 packets.
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for IPv4 packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 UINT32_C(0x4)
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC is configured to enable Header-Data
- * split for IPv6 packets.
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for IPv6 packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 UINT32_C(0x8)
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 \
+ UINT32_C(0x8)
/*
- * When this bit is '1', the VNIC is configured to enable Header-Data
- * split for FCoE packets.
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for FCoE packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE UINT32_C(0x10)
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE \
+ UINT32_C(0x10)
/*
- * When this bit is '1', the VNIC is configured to enable Header-Data
- * split for RoCE packets.
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for RoCE packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE UINT32_C(0x20)
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE \
+ UINT32_C(0x20)
/*
- * When this bit is '1', the VNIC is configured to be the default VNIC
- * of the requesting function.
+ * When this bit is '1', the VNIC is configured
+ * to be the default VNIC of the requesting function.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC UINT32_C(0x40)
- uint16_t jumbo_thresh;
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC \
+ UINT32_C(0x40)
/*
- * When jumbo placement algorithm is enabled, this value is used to
- * determine the threshold for jumbo placement. Packets with length
- * larger than this value will be placed according to the jumbo
- * placement algorithm.
+ * When jumbo placement algorithm is enabled, this value
+ * is used to determine the threshold for jumbo placement.
+ * Packets with length larger than this value will be
+ * placed according to the jumbo placement algorithm.
*/
- uint16_t hds_offset;
+ uint16_t jumbo_thresh;
/*
- * This value is used to determine the offset into packet buffer where
- * the split data (payload) will be placed according to one of of HDS
- * placement algorithm. The lengths of packet buffers provided for split
- * data shall be larger than this value.
+ * This value is used to determine the offset into
+ * packet buffer where the split data (payload) will be
+ * placed according to one of of HDS placement algorithm.
+ *
+ * The lengths of packet buffers provided for split data
+ * shall be larger than this value.
*/
- uint16_t hds_threshold;
+ uint16_t hds_offset;
/*
- * When one of the HDS placement algorithm is enabled, this value is
- * used to determine the threshold for HDS placement. Packets with
- * length larger than this value will be placed according to the HDS
- * placement algorithm. This value shall be in multiple of 4 bytes.
+ * When one of the HDS placement algorithm is enabled, this
+ * value is used to determine the threshold for HDS
+ * placement.
+ * Packets with length larger than this value will be
+ * placed according to the HDS placement algorithm.
+ * This value shall be in multiple of 4 bytes.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t valid;
+ uint16_t hds_threshold;
+ uint8_t unused_0[5];
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_rss_cos_lb_ctx_alloc */
-/* Description: This function is used to allocate COS/Load Balance context. */
-/* Input (16 bytes) */
+/**********************************
+ * hwrm_vnic_rss_cos_lb_ctx_alloc *
+ **********************************/
+
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint16_t rss_cos_lb_ctx_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* rss_cos_lb_ctx_id is 16 b */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t valid;
+ uint16_t rss_cos_lb_ctx_id;
+ uint8_t unused_0[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_rss_cos_lb_ctx_free */
-/* Description: This function can be used to free COS/Load Balance context. */
-/* Input (24 bytes) */
+/*********************************
+ * hwrm_vnic_rss_cos_lb_ctx_free *
+ *********************************/
+
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t rss_cos_lb_ctx_id;
+ uint64_t resp_addr;
/* rss_cos_lb_ctx_id is 16 b */
- uint16_t unused_0[3];
+ uint16_t rss_cos_lb_ctx_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_ring_alloc */
-/*
- * Description: This command allocates and does basic preparation for a ring.
- */
-/* Input (80 bytes) */
+/*******************
+ * hwrm_ring_alloc *
+ *******************/
+
+
+/* hwrm_ring_alloc_input (size:640b/80B) */
struct hwrm_ring_alloc_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the ring_arb_cfg field to be
+ * configured.
*/
- uint32_t enables;
- /* This bit must be '1' for the Reserved1 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED1 UINT32_C(0x1)
- /* This bit must be '1' for the ring_arb_cfg field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG UINT32_C(0x2)
- /* This bit must be '1' for the Reserved3 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED3 UINT32_C(0x4)
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG \
+ UINT32_C(0x2)
/*
* This bit must be '1' for the stat_ctx_id_valid field to be
* configured.
*/
- #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID UINT32_C(0x8)
- /* This bit must be '1' for the Reserved4 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED4 UINT32_C(0x10)
- /* This bit must be '1' for the max_bw_valid field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20)
- uint8_t ring_type;
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the max_bw_valid field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the rx_ring_id field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the nq_ring_id field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the rx_buf_size field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID \
+ UINT32_C(0x100)
/* Ring Type. */
- /* L2 Completion Ring (CR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
- /* TX Ring (TR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1)
- /* RX Ring (RR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2)
- /* RoCE Notification Completion Ring (ROCE_CR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
- uint8_t unused_0;
- uint16_t unused_1;
- uint64_t page_tbl_addr;
- /* This value is a pointer to the page table for the Ring. */
- uint32_t fbo;
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ /* RX Aggregation Ring */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4)
+ /* Notification Queue */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ UINT32_C(0x5)
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_LAST \
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ
+ uint8_t unused_0[3];
+ /*
+ * This value is a pointer to the page table for the
+ * Ring.
+ */
+ uint64_t page_tbl_addr;
/* First Byte Offset of the first entry in the first page. */
- uint8_t page_size;
- /*
- * Actual page size in 2^page_size. The supported range is
- * increments in powers of 2 from 16 bytes to 1GB. - 4 = 16 B
- * Page size is 16 B. - 12 = 4 KB Page size is 4 KB. - 13 = 8 KB
- * Page size is 8 KB. - 16 = 64 KB Page size is 64 KB. - 21 = 2
- * MB Page size is 2 MB. - 22 = 4 MB Page size is 4 MB. - 30 = 1
- * GB Page size is 1 GB.
+ uint32_t fbo;
+ /*
+ * Actual page size in 2^page_size. The supported range is increments
+ * in powers of 2 from 16 bytes to 1GB.
+ * - 4 = 16 B
+ * Page size is 16 B.
+ * - 12 = 4 KB
+ * Page size is 4 KB.
+ * - 13 = 8 KB
+ * Page size is 8 KB.
+ * - 16 = 64 KB
+ * Page size is 64 KB.
+ * - 21 = 2 MB
+ * Page size is 2 MB.
+ * - 22 = 4 MB
+ * Page size is 4 MB.
+ * - 30 = 1 GB
+ * Page size is 1 GB.
+ */
+ uint8_t page_size;
+ /*
+ * This value indicates the depth of page table.
+ * For this version of the specification, value other than 0 or
+ * 1 shall be considered as an invalid value.
+ * When the page_tbl_depth = 0, then it is treated as a
+ * special case with the following.
+ * 1. FBO and page size fields are not valid.
+ * 2. page_tbl_addr is the physical address of the first
+ * element of the ring.
+ */
+ uint8_t page_tbl_depth;
+ uint8_t unused_1[2];
+ /*
+ * Number of 16B units in the ring. Minimum size for
+ * a ring is 16 16B entries.
+ */
+ uint32_t length;
+ /*
+ * Logical ring number for the ring to be allocated.
+ * This value determines the position in the doorbell
+ * area where the update to the ring will be made.
+ *
+ * For completion rings, this value is also the MSI-X
+ * vector number for the function the completion ring is
+ * associated with.
*/
- uint8_t page_tbl_depth;
+ uint16_t logical_id;
/*
- * This value indicates the depth of page table. For this
- * version of the specification, value other than 0 or 1 shall
- * be considered as an invalid value. When the page_tbl_depth =
- * 0, then it is treated as a special case with the following.
- * 1. FBO and page size fields are not valid. 2. page_tbl_addr
- * is the physical address of the first element of the ring.
+ * This field is used only when ring_type is a TX ring.
+ * This value indicates what completion ring the TX ring
+ * is associated with.
*/
- uint8_t unused_2;
- uint8_t unused_3;
- uint32_t length;
+ uint16_t cmpl_ring_id;
/*
- * Number of 16B units in the ring. Minimum size for a ring is
- * 16 16B entries.
+ * This field is used only when ring_type is a TX ring.
+ * This value indicates what CoS queue the TX ring
+ * is associated with.
*/
- uint16_t logical_id;
+ uint16_t queue_id;
/*
- * Logical ring number for the ring to be allocated. This value
- * determines the position in the doorbell area where the update
- * to the ring will be made. For completion rings, this value is
- * also the MSI-X vector number for the function the completion
- * ring is associated with.
+ * When allocating a Rx ring or Rx aggregation ring, this field
+ * specifies the size of the buffer descriptors posted to the ring.
*/
- uint16_t cmpl_ring_id;
+ uint16_t rx_buf_size;
/*
- * This field is used only when ring_type is a TX ring. This
- * value indicates what completion ring the TX ring is
- * associated with.
+ * When allocating an Rx aggregation ring, this field
+ * specifies the associated Rx ring ID.
*/
- uint16_t queue_id;
+ uint16_t rx_ring_id;
/*
- * This field is used only when ring_type is a TX ring. This
- * value indicates what CoS queue the TX ring is associated
- * with.
+ * When allocating a completion ring, this field
+ * specifies the associated NQ ring ID.
*/
- uint8_t unused_4;
- uint8_t unused_5;
- uint32_t reserved1;
- /* This field is reserved for the future use. It shall be set to 0. */
- uint16_t ring_arb_cfg;
+ uint16_t nq_ring_id;
/*
- * This field is used only when ring_type is a TX ring. This
- * field is used to configure arbitration related parameters for
- * a TX ring.
+ * This field is used only when ring_type is a TX ring.
+ * This field is used to configure arbitration related
+ * parameters for a TX ring.
*/
+ uint16_t ring_arb_cfg;
/* Arbitration policy used for the ring. */
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK UINT32_C(0xf)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \
+ UINT32_C(0xf)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0
/*
- * Use strict priority for the TX ring. Priority
- * value is specified in arb_policy_param
+ * Use strict priority for the TX ring.
+ * Priority value is specified in arb_policy_param
*/
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \
- (UINT32_C(0x1) << 0)
+ UINT32_C(0x1)
/*
- * Use weighted fair queue arbitration for the
- * TX ring. Weight is specified in
- * arb_policy_param
+ * Use weighted fair queue arbitration for the TX ring.
+ * Weight is specified in arb_policy_param
*/
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \
- (UINT32_C(0x2) << 0)
+ UINT32_C(0x2)
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \
- RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ
+ HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ
/* Reserved field. */
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK UINT32_C(0xf0)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4
- /*
- * Arbitration policy specific parameter. # For strict priority
- * arbitration policy, this field represents a priority value.
- * If set to 0, then the priority is not specified and the HWRM
- * is allowed to select any priority for this TX ring. # For
- * weighted fair queue arbitration policy, this field represents
- * a weight value. If set to 0, then the weight is not specified
- * and the HWRM is allowed to select any weight for this TX
- * ring.
- */
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4
+ /*
+ * Arbitration policy specific parameter.
+ * # For strict priority arbitration policy, this field
+ * represents a priority value. If set to 0, then the priority
+ * is not specified and the HWRM is allowed to select
+ * any priority for this TX ring.
+ * # For weighted fair queue arbitration policy, this field
+ * represents a weight value. If set to 0, then the weight
+ * is not specified and the HWRM is allowed to select
+ * any weight for this TX ring.
+ */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \
UINT32_C(0xff00)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- uint8_t unused_6;
- uint8_t unused_7;
- uint32_t reserved3;
- /* This field is reserved for the future use. It shall be set to 0. */
- uint32_t stat_ctx_id;
- /*
- * This field is used only when ring_type is a TX ring. This
- * input indicates what statistics context this ring should be
- * associated with.
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ uint16_t unused_3;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint32_t reserved3;
+ /*
+ * This field is used only when ring_type is a TX ring.
+ * This input indicates what statistics context this ring
+ * should be associated with.
+ */
+ uint32_t stat_ctx_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
*/
- uint32_t reserved4;
- /* This field is reserved for the future use. It shall be set to 0. */
- uint32_t max_bw;
+ uint32_t reserved4;
/*
- * This field is used only when ring_type is a TX ring to
- * specify maximum BW allocated to the TX ring. The HWRM will
- * translate this value into byte counter and time interval used
- * for this ring inside the device.
+ * This field is used only when ring_type is a TX ring
+ * to specify maximum BW allocated to the TX ring.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this ring inside the device.
*/
+ uint32_t max_bw;
/* The bandwidth value. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
#define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \
- RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES
+ HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
@@ -8723,628 +19218,932 @@ struct hwrm_ring_alloc_input {
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
- RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
- uint8_t int_mode;
+ HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
/*
* This field is used only when ring_type is a Completion ring.
- * This value indicates what interrupt mode should be used on
- * this completion ring. Note: In the legacy interrupt mode, no
- * more than 16 completion rings are allowed.
+ * This value indicates what interrupt mode should be used
+ * on this completion ring.
+ * Note: In the legacy interrupt mode, no more than 16
+ * completion rings are allowed.
*/
+ uint8_t int_mode;
/* Legacy INTA */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0)
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0)
/* Reserved */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1)
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1)
/* MSI-X */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2)
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2)
/* No Interrupt - Polled mode */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3)
- uint8_t unused_8[3];
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3)
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_LAST \
+ HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
+ uint8_t unused_4[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_ring_alloc_output (size:128b/16B) */
struct hwrm_ring_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint16_t ring_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * Physical number of ring allocated. This value shall be unique
- * for a ring type.
+ * Physical number of ring allocated.
+ * This value shall be unique for a ring type.
*/
- uint16_t logical_ring_id;
+ uint16_t ring_id;
/* Logical number of ring allocated. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t logical_ring_id;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_ring_free */
-/*
- * Description: This command is used to free a ring and associated resources.
- * With QoS and DCBx agents, it is possible the traffic classes will be moved
- * from one CoS queue to another. When this occurs, the driver shall call
- * 'hwrm_ring_free' to free the allocated rings and then call 'hwrm_ring_alloc'
- * to re-allocate each ring and assign it to a new CoS queue. hwrm_ring_free
- * shall be called on a ring only after it has been idle for 500ms or more and
- * no frames have been posted to the ring during this time. All frames queued
- * for transmission shall be completed and at least 500ms time elapsed from the
- * last completion before calling this command.
- */
-/* Input (24 bytes) */
+/******************
+ * hwrm_ring_free *
+ ******************/
+
+
+/* hwrm_ring_free_input (size:192b/24B) */
struct hwrm_ring_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint8_t ring_type;
+ uint64_t resp_addr;
/* Ring Type. */
- /* L2 Completion Ring (CR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
- /* TX Ring (TR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1)
- /* RX Ring (RR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2)
- /* RoCE Notification Completion Ring (ROCE_CR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
- uint8_t unused_0;
- uint16_t ring_id;
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ /* RX Aggregation Ring */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4)
+ /* Notification Queue */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_NQ UINT32_C(0x5)
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_LAST \
+ HWRM_RING_FREE_INPUT_RING_TYPE_NQ
+ uint8_t unused_0;
/* Physical number of ring allocated. */
- uint32_t unused_1;
+ uint16_t ring_id;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_ring_free_output (size:128b/16B) */
struct hwrm_ring_free_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************************
+ * hwrm_ring_cmpl_ring_qaggint_params *
+ **************************************/
+
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /* Physical number of completion ring. */
+ uint16_t ring_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_ring_grp_alloc */
-/*
- * Description: This API allocates and does basic preparation for a ring group.
- */
-/* Input (24 bytes) */
-struct hwrm_ring_grp_alloc_input {
- uint16_t req_type;
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint16_t flags;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * When this bit is set to '1', interrupt max
+ * timer is reset whenever a completion is received.
*/
- uint16_t cmpl_ring;
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_TIMER_RESET \
+ UINT32_C(0x1)
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * When this bit is set to '1', ring idle mode
+ * aggregation will be enabled.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_RING_IDLE \
+ UINT32_C(0x2)
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * Number of completions to aggregate before DMA
+ * during the normal mode.
*/
- uint64_t resp_addr;
+ uint16_t num_cmpl_dma_aggr;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * Number of completions to aggregate before DMA
+ * during the interrupt mode.
*/
- uint16_t cr;
- /* This value identifies the CR associated with the ring group. */
- uint16_t rr;
- /* This value identifies the main RR associated with the ring group. */
- uint16_t ar;
+ uint16_t num_cmpl_dma_aggr_during_int;
/*
- * This value identifies the aggregation RR associated with the
- * ring group. If this value is 0xFF... (All Fs), then no
- * Aggregation ring will be set.
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the normal mode (not in interrupt mode).
*/
- uint16_t sc;
+ uint16_t cmpl_aggr_dma_tmr;
/*
- * This value identifies the statistics context associated with
- * the ring group.
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the interrupt mode.
*/
+ uint16_t cmpl_aggr_dma_tmr_during_int;
+ /* Minimum time (in unit of 80-nsec) between two interrupts. */
+ uint16_t int_lat_tmr_min;
+ /*
+ * Maximum wait time (in unit of 80-nsec) spent aggregating
+ * completions before signaling the interrupt after the
+ * interrupt is enabled.
+ */
+ uint16_t int_lat_tmr_max;
+ /*
+ * Minimum number of completions aggregated before signaling
+ * an interrupt.
+ */
+ uint16_t num_cmpl_aggr_int;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_ring_grp_alloc_output {
- uint16_t error_code;
+/*****************************************
+ * hwrm_ring_cmpl_ring_cfg_aggint_params *
+ *****************************************/
+
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t ring_group_id;
+ uint16_t seq_id;
/*
- * This is the ring group ID value. Use this value to program
- * the default ring group for the VNIC or as table entries in an
- * RSS/COS context.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /* Physical number of completion ring. */
+ uint16_t ring_id;
+ uint16_t flags;
+ /*
+ * When this bit is set to '1', interrupt latency max
+ * timer is reset whenever a completion is received.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', ring idle mode
+ * aggregation will be enabled.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE \
+ UINT32_C(0x2)
+ /*
+ * Set this flag to 1 when configuring parameters on a
+ * notification queue. Set this flag to 0 when configuring
+ * parameters on a completion queue.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_IS_NQ \
+ UINT32_C(0x4)
+ /*
+ * Number of completions to aggregate before DMA
+ * during the normal mode.
+ */
+ uint16_t num_cmpl_dma_aggr;
+ /*
+ * Number of completions to aggregate before DMA
+ * during the interrupt mode.
+ */
+ uint16_t num_cmpl_dma_aggr_during_int;
+ /*
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the normal mode (not in interrupt mode).
+ */
+ uint16_t cmpl_aggr_dma_tmr;
+ /*
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the interrupt mode.
+ */
+ uint16_t cmpl_aggr_dma_tmr_during_int;
+ /* Minimum time (in unit of 80-nsec) between two interrupts. */
+ uint16_t int_lat_tmr_min;
+ /*
+ * Maximum wait time (in unit of 80-nsec) spent aggregating
+ * cmpls before signaling the interrupt after the
+ * interrupt is enabled.
+ */
+ uint16_t int_lat_tmr_max;
+ /*
+ * Minimum number of completions aggregated before signaling
+ * an interrupt.
+ */
+ uint16_t num_cmpl_aggr_int;
+ /*
+ * Bitfield that indicates which parameters are to be applied. Only
+ * required when configuring devices with notification queues, and
+ * used in that case to set certain parameters on completion queues
+ * and others on notification queues.
+ */
+ uint16_t enables;
+ /*
+ * This bit must be '1' for the num_cmpl_dma_aggr field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cmpl_aggr_dma_tmr field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the int_lat_tmr_min field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MIN \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the int_lat_tmr_max field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MAX \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the num_cmpl_aggr_int field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_AGGR_INT \
+ UINT32_C(0x20)
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* hwrm_ring_grp_free */
-/*
- * Description: This API frees a ring group and associated resources. # If a
- * ring in the ring group is reset or free, then the associated rings in the
- * ring group shall also be reset/free using hwrm_ring_free. # A function driver
- * shall always use hwrm_ring_grp_free after freeing all rings in a group. # As
- * a part of executing this command, the HWRM shall reset all associated ring
- * group resources.
- */
-/* Input (24 bytes) */
-struct hwrm_ring_grp_free_input {
- uint16_t req_type;
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_ring_reset *
+ *******************/
+
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t ring_group_id;
- /* This is the ring group ID value. */
- uint32_t unused_0;
+ uint64_t resp_addr;
+ /* Ring Type. */
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_LAST \
+ HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL
+ uint8_t unused_0;
+ /* Physical number of the ring. */
+ uint16_t ring_id;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_ring_grp_free_output {
- uint16_t error_code;
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_ring_grp_alloc *
+ ***********************/
+
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+struct hwrm_ring_grp_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value identifies the CR associated with the ring
+ * group.
+ */
+ uint16_t cr;
+ /*
+ * This value identifies the main RR associated with the ring
+ * group.
+ */
+ uint16_t rr;
+ /*
+ * This value identifies the aggregation RR associated with
+ * the ring group. If this value is 0xFF... (All Fs), then no
+ * Aggregation ring will be set.
+ */
+ uint16_t ar;
+ /*
+ * This value identifies the statistics context associated
+ * with the ring group.
+ */
+ uint16_t sc;
} __attribute__((packed));
-/* hwrm_cfa_l2_filter_alloc */
-/*
- * Description: An L2 filter is a filter resource that is used to identify a
- * vnic or ring for a packet based on layer 2 fields. Layer 2 fields for
- * encapsulated packets include both outer L2 header and/or inner l2 header of
- * encapsulated packet. The L2 filter resource covers the following OS specific
- * L2 filters. Linux/FreeBSD (per function): # Broadcast enable/disable # List
- * of individual multicast filters # All multicast enable/disable filter #
- * Unicast filters # Promiscuous mode VMware: # Broadcast enable/disable (per
- * physical function) # All multicast enable/disable (per function) # Unicast
- * filters per ring or vnic # Promiscuous mode per PF Windows: # Broadcast
- * enable/disable (per physical function) # List of individual multicast filters
- * (Driver needs to advertise the maximum number of filters supported) # All
- * multicast enable/disable per physical function # Unicast filters per vnic #
- * Promiscuous mode per PF Implementation notes on the use of VNIC in this
- * command: # By default, these filters belong to default vnic for the function.
- * # Once these filters are set up, only destination VNIC can be modified. # If
- * the destination VNIC is not specified in this command, then the HWRM shall
- * only create an l2 context id. HWRM Implementation notes for multicast
- * filters: # The hwrm_filter_alloc command can be used to set up multicast
- * filters (perfect match or partial match). Each individual function driver can
- * set up multicast filters independently. # The HWRM needs to keep track of
- * multicast filters set up by function drivers and maintain multicast group
- * replication records to enable a subset of functions to receive traffic for a
- * specific multicast address. # When a specific multicast filter cannot be set,
- * the HWRM shall return an error. In this error case, the driver should fall
- * back to using one general filter (rather than specific) for all multicast
- * traffic. # When the SR-IOV is enabled, the HWRM needs to additionally track
- * source knockout per multicast group record. Examples of setting unicast
- * filters: For a unicast MAC based filter, one can use a combination of the
- * fields and masks provided in this command to set up the filter. Below are
- * some examples: # MAC + no VLAN filter: This filter is used to identify
- * traffic that does not contain any VLAN tags and matches destination (or
- * source) MAC address. This filter can be set up by setting only l2_addr field
- * to be a valid field. All other fields are not valid. The following value is
- * set for l2_addr. l2_addr = MAC # MAC + Any VLAN filter: This filter is used
- * to identify traffic that carries single VLAN tag and matches (destination or
- * source) MAC address. This filter can be set up by setting only l2_addr and
- * l2_ovlan_mask fields to be valid fields. All other fields are not valid. The
- * following values are set for those two valid fields. l2_addr = MAC,
- * l2_ovlan_mask = 0xFFFF # MAC + no VLAN or VLAN ID=0: This filter is used to
- * identify untagged traffic that does not contain any VLAN tags or a VLAN tag
- * with VLAN ID = 0 and matches destination (or source) MAC address. This filter
- * can be set up by setting only l2_addr and l2_ovlan fields to be valid fields.
- * All other fields are not valid. The following value are set for l2_addr and
- * l2_ovlan. l2_addr = MAC, l2_ovlan = 0x0 # MAC + no VLAN or any VLAN: This
- * filter is used to identify traffic that contains zero or 1 VLAN tag and
- * matches destination (or source) MAC address. This filter can be set up by
- * setting only l2_addr, l2_ovlan, and l2_mask fields to be valid fields. All
- * other fields are not valid. The following value are set for l2_addr,
- * l2_ovlan, and l2_mask fields. l2_addr = MAC, l2_ovlan = 0x0, l2_ovlan_mask =
- * 0xFFFF # MAC + VLAN ID filter: This filter can be set up by setting only
- * l2_addr, l2_ovlan, and l2_ovlan_mask fields to be valid fields. All other
- * fields are not valid. The following values are set for those three valid
- * fields. l2_addr = MAC, l2_ovlan = VLAN ID, l2_ovlan_mask = 0xF000
- */
-/* Input (96 bytes) */
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+struct hwrm_ring_grp_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the ring group ID value. Use this value to program
+ * the default ring group for the VNIC or as table entries
+ * in an RSS/COS context.
+ */
+ uint32_t ring_group_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_ring_grp_free *
+ **********************/
+
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
+struct hwrm_ring_grp_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This is the ring group ID value. */
+ uint32_t ring_group_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_ring_grp_free_output (size:128b/16B) */
+struct hwrm_ring_grp_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_cfa_l2_filter_alloc *
+ ****************************/
+
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
struct hwrm_cfa_l2_filter_alloc_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
* TX and RX paths of the chip.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH \
+ UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \
- (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
/* rx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \
- (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
- CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP \
+ UINT32_C(0x4)
/*
- * Setting of this flag indicates the applicability to the
- * loopback path.
+ * If this flag is set, all t_l2_* fields are invalid
+ * and they should not be specified.
+ * If this flag is set, then l2_* fields refer to
+ * fields of outermost L2 header.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \
+ UINT32_C(0x8)
+ uint32_t enables;
/*
- * Setting of this flag indicates drop action. If this flag is
- * not set, then it should be considered accept action.
+ * This bit must be '1' for the l2_addr field to be
+ * configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x4)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \
+ UINT32_C(0x1)
/*
- * If this flag is set, all t_l2_* fields are invalid and they
- * should not be specified. If this flag is set, then l2_*
- * fields refer to fields of outermost L2 header.
+ * This bit must be '1' for the l2_addr_mask field to be
+ * configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8)
- uint32_t enables;
- /* This bit must be '1' for the l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1)
- /* This bit must be '1' for the l2_addr_mask field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \
UINT32_C(0x2)
- /* This bit must be '1' for the l2_ovlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the l2_ovlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN \
+ UINT32_C(0x4)
/*
* This bit must be '1' for the l2_ovlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \
UINT32_C(0x8)
- /* This bit must be '1' for the l2_ivlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the l2_ivlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \
+ UINT32_C(0x10)
/*
* This bit must be '1' for the l2_ivlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \
UINT32_C(0x20)
- /* This bit must be '1' for the t_l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the t_l2_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR \
+ UINT32_C(0x40)
/*
* This bit must be '1' for the t_l2_addr_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \
UINT32_C(0x80)
- /* This bit must be '1' for the t_l2_ovlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \
+ /*
+ * This bit must be '1' for the t_l2_ovlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \
UINT32_C(0x100)
/*
* This bit must be '1' for the t_l2_ovlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \
UINT32_C(0x200)
- /* This bit must be '1' for the t_l2_ivlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \
+ /*
+ * This bit must be '1' for the t_l2_ivlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \
UINT32_C(0x400)
/*
* This bit must be '1' for the t_l2_ivlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \
UINT32_C(0x800)
- /* This bit must be '1' for the src_type field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE UINT32_C(0x1000)
- /* This bit must be '1' for the src_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000)
- /* This bit must be '1' for the tunnel_type field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ /*
+ * This bit must be '1' for the src_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the src_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
UINT32_C(0x4000)
- /* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x8000)
/*
* This bit must be '1' for the mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
UINT32_C(0x10000)
- uint8_t l2_addr[6];
/*
* This value sets the match value for the L2 MAC address.
- * Destination MAC address for RX path. Source MAC address for
- * TX path.
+ * Destination MAC address for RX path.
+ * Source MAC address for TX path.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t l2_addr_mask[6];
+ uint8_t l2_addr[6];
+ uint8_t unused_0[2];
/*
- * This value sets the mask value for the L2 address. A value of
- * 0 will mask the corresponding bit from compare.
+ * This value sets the mask value for the L2 address.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint16_t l2_ovlan;
+ uint8_t l2_addr_mask[6];
/* This value sets VLAN ID value for outer VLAN. */
- uint16_t l2_ovlan_mask;
+ uint16_t l2_ovlan;
/*
- * This value sets the mask value for the ovlan id. A value of 0
- * will mask the corresponding bit from compare.
+ * This value sets the mask value for the ovlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint16_t l2_ivlan;
+ uint16_t l2_ovlan_mask;
/* This value sets VLAN ID value for inner VLAN. */
- uint16_t l2_ivlan_mask;
+ uint16_t l2_ivlan;
/*
- * This value sets the mask value for the ivlan id. A value of 0
- * will mask the corresponding bit from compare.
+ * This value sets the mask value for the ivlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t t_l2_addr[6];
+ uint16_t l2_ivlan_mask;
+ uint8_t unused_1[2];
/*
- * This value sets the match value for the tunnel L2 MAC
- * address. Destination MAC address for RX path. Source MAC
- * address for TX path.
+ * This value sets the match value for the tunnel
+ * L2 MAC address.
+ * Destination MAC address for RX path.
+ * Source MAC address for TX path.
*/
- uint8_t unused_4;
- uint8_t unused_5;
- uint8_t t_l2_addr_mask[6];
+ uint8_t t_l2_addr[6];
+ uint8_t unused_2[2];
/*
- * This value sets the mask value for the tunnel L2 address. A
- * value of 0 will mask the corresponding bit from compare.
+ * This value sets the mask value for the tunnel L2
+ * address.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint16_t t_l2_ovlan;
+ uint8_t t_l2_addr_mask[6];
/* This value sets VLAN ID value for tunnel outer VLAN. */
- uint16_t t_l2_ovlan_mask;
+ uint16_t t_l2_ovlan;
/*
- * This value sets the mask value for the tunnel ovlan id. A
- * value of 0 will mask the corresponding bit from compare.
+ * This value sets the mask value for the tunnel ovlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint16_t t_l2_ivlan;
+ uint16_t t_l2_ovlan_mask;
/* This value sets VLAN ID value for tunnel inner VLAN. */
- uint16_t t_l2_ivlan_mask;
+ uint16_t t_l2_ivlan;
/*
- * This value sets the mask value for the tunnel ivlan id. A
- * value of 0 will mask the corresponding bit from compare.
+ * This value sets the mask value for the tunnel ivlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint8_t src_type;
+ uint16_t t_l2_ivlan_mask;
/* This value identifies the type of source of the packet. */
+ uint8_t src_type;
/* Network port */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0)
/* Physical function */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1)
/* Virtual function */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2)
/* Virtual NIC of a function */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3)
/* Embedded processor for CFA management */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4)
/* Embedded processor for OOB management */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5)
/* Embedded processor for RoCE */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6)
/* Embedded processor for network proxy functions */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7)
- uint8_t unused_6;
- uint32_t src_id;
- /*
- * This value is the id of the source. For a network port, it
- * represents port_id. For a physical function, it represents
- * fid. For a virtual function, it represents vf_id. For a vnic,
- * it represents vnic_id. For embedded processors, this id is
- * not valid. Notes: 1. The function ID is implied if it src_id
- * is not provided for a src_type that is either
- */
- uint8_t tunnel_type;
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG
+ uint8_t unused_3;
+ /*
+ * This value is the id of the source.
+ * For a network port, it represents port_id.
+ * For a physical function, it represents fid.
+ * For a virtual function, it represents vf_id.
+ * For a vnic, it represents vnic_id.
+ * For embedded processors, this id is not valid.
+ *
+ * Notes:
+ * 1. The function ID is implied if it src_id is
+ * not provided for a src_type that is either
+ */
+ uint32_t src_id;
/* Tunnel Type. */
+ uint8_t tunnel_type;
/* Non-tunnel */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
UINT32_C(0x0)
- /* Virtual eXtensible Local Area Network (VXLAN) */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
UINT32_C(0x1)
- /*
- * Network Virtualization Generic Routing
- * Encapsulation (NVGRE)
- */
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
UINT32_C(0x2)
- /*
- * Generic Routing Encapsulation (GRE) inside
- * Ethernet payload
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
/* IP in IP */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
- /* Multi-Protocol Lable Switching (MPLS) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6)
- /* Stateless Transport Tunnel (STT) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
- /*
- * Generic Routing Encapsulation (GRE) inside IP
- * datagram payload
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
- /*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
- */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
UINT32_C(0x9)
/* Any tunneled traffic */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
UINT32_C(0xff)
- uint8_t unused_7;
- uint16_t dst_id;
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_4;
/*
- * If set, this value shall represent the Logical VNIC ID of the
- * destination VNIC for the RX path and network port id of the
- * destination port for the TX path.
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
*/
- uint16_t mirror_vnic_id;
- /* Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint8_t pri_hint;
+ uint16_t dst_id;
/*
- * This hint is provided to help in placing the filter in the
- * filter table.
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
*/
+ uint16_t mirror_vnic_id;
+ /*
+ * This hint is provided to help in placing
+ * the filter in the filter table.
+ */
+ uint8_t pri_hint;
/* No preference */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
UINT32_C(0x0)
@@ -9355,576 +20154,1554 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \
UINT32_C(0x2)
/* As high as possible */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX UINT32_C(0x3)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX \
+ UINT32_C(0x3)
/* As low as possible */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN UINT32_C(0x4)
- uint8_t unused_8;
- uint32_t unused_9;
- uint64_t l2_filter_id_hint;
- /*
- * This is the ID of the filter that goes along with the
- * pri_hint. This field is valid only for the following values.
- * 1 - Above the given filter 2 - Below the given filter
- */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN \
+ UINT32_C(0x4)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN
+ uint8_t unused_5;
+ uint32_t unused_6;
+ /*
+ * This is the ID of the filter that goes along with
+ * the pri_hint.
+ *
+ * This field is valid only for the following values.
+ * 1 - Above the given filter
+ * 2 - Below the given filter
+ */
+ uint64_t l2_filter_id_hint;
} __attribute__((packed));
-/* Output (24 bytes) */
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_l2_filter_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint64_t l2_filter_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
- uint32_t flow_id;
+ uint64_t l2_filter_id;
/*
- * This is the ID of the flow associated with this filter. This
- * value shall be used to match and associate the flow
- * identifier returned in completion records. A value of
- * 0xFFFFFFFF shall indicate no flow id.
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint32_t flow_id;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_l2_filter_free */
-/*
- * Description: Free a L2 filter. The HWRM shall free all associated filter
- * resources with the L2 filter.
- */
-/* Input (24 bytes) */
+/***************************
+ * hwrm_cfa_l2_filter_free *
+ ***************************/
+
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
struct hwrm_cfa_l2_filter_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint64_t l2_filter_id;
+ uint64_t resp_addr;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
+ uint64_t l2_filter_id;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_free_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_l2_filter_cfg */
-/* Description: Change the configuration of an existing L2 filter */
-/* Input (40 bytes) */
+/**************************
+ * hwrm_cfa_l2_filter_cfg *
+ **************************/
+
+
+/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
struct hwrm_cfa_l2_filter_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
* TX and RX paths of the chip.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX \
- (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
/* rx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX \
- (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \
- CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
+ HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2)
+ uint32_t enables;
/*
- * Setting of this flag indicates drop action. If this flag is
- * not set, then it should be considered accept action.
+ * This bit must be '1' for the dst_id field to be
+ * configured.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2)
- uint32_t enables;
- /* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x1)
/*
* This bit must be '1' for the new_mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
UINT32_C(0x2)
- uint64_t l2_filter_id;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
- uint32_t dst_id;
+ uint64_t l2_filter_id;
/*
- * If set, this value shall represent the Logical VNIC ID of the
- * destination VNIC for the RX path and network port id of the
- * destination port for the TX path.
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
*/
- uint32_t new_mirror_vnic_id;
- /* New Logical VNIC ID of the VNIC where traffic is mirrored. */
+ uint32_t dst_id;
+ /*
+ * New Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint32_t new_mirror_vnic_id;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_l2_set_rx_mask */
-/* Description: This command will set rx mask of the function. */
-/* Input (56 bytes) */
+/***************************
+ * hwrm_cfa_l2_set_rx_mask *
+ ***************************/
+
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
struct hwrm_cfa_l2_set_rx_mask_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t vnic_id;
+ uint64_t resp_addr;
/* VNIC ID */
- uint32_t mask;
- /* Reserved for future use. */
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_RESERVED UINT32_C(0x1)
+ uint32_t vnic_id;
+ uint32_t mask;
/*
* When this bit is '1', the function is requested to accept
* multi-cast packets specified by the multicast addr table.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST UINT32_C(0x2)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the function is requested to accept all
- * multi-cast packets.
+ * When this bit is '1', the function is requested to accept
+ * all multi-cast packets.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST UINT32_C(0x4)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST \
+ UINT32_C(0x4)
/*
* When this bit is '1', the function is requested to accept
* broadcast packets.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST UINT32_C(0x8)
- /*
- * When this bit is '1', the function is requested to be put in
- * the promiscuous mode. The HWRM should accept any function to
- * set up promiscuous mode. The HWRM shall follow the semantics
- * below for the promiscuous mode support. # When partitioning
- * is not enabled on a port (i.e. single PF on the port), then
- * the PF shall be allowed to be in the promiscuous mode. When
- * the PF is in the promiscuous mode, then it shall receive all
- * host bound traffic on that port. # When partitioning is
- * enabled on a port (i.e. multiple PFs per port) and a PF on
- * that port is in the promiscuous mode, then the PF receives
- * all traffic within that partition as identified by a unique
- * identifier for the PF (e.g. S-Tag). If a unique outer VLAN
- * for the PF is specified, then the setting of promiscuous mode
- * on that PF shall result in the PF receiving all host bound
- * traffic with matching outer VLAN. # A VF shall can be set in
- * the promiscuous mode. In the promiscuous mode, the VF does
- * not receive any traffic unless a unique outer VLAN for the VF
- * is specified. If a unique outer VLAN for the VF is specified,
- * then the setting of promiscuous mode on that VF shall result
- * in the VF receiving all host bound traffic with the matching
- * outer VLAN. # The HWRM shall allow the setting of promiscuous
- * mode on a function independently from the promiscuous mode
- * settings on other functions.
- */
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
- /*
- * If this flag is set, the corresponding RX filters shall be
- * set up to cover multicast/broadcast filters for the outermost
- * Layer 2 destination MAC address field.
- */
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST UINT32_C(0x20)
- /*
- * If this flag is set, the corresponding RX filters shall be
- * set up to cover multicast/broadcast filters for the VLAN-
- * tagged packets that match the TPID and VID fields of VLAN
- * tags in the VLAN tag table specified in this command.
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the function is requested to be
+ * put in the promiscuous mode.
+ *
+ * The HWRM should accept any function to set up
+ * promiscuous mode.
+ *
+ * The HWRM shall follow the semantics below for the
+ * promiscuous mode support.
+ * # When partitioning is not enabled on a port
+ * (i.e. single PF on the port), then the PF shall
+ * be allowed to be in the promiscuous mode. When the
+ * PF is in the promiscuous mode, then it shall
+ * receive all host bound traffic on that port.
+ * # When partitioning is enabled on a port
+ * (i.e. multiple PFs per port) and a PF on that
+ * port is in the promiscuous mode, then the PF
+ * receives all traffic within that partition as
+ * identified by a unique identifier for the
+ * PF (e.g. S-Tag). If a unique outer VLAN
+ * for the PF is specified, then the setting of
+ * promiscuous mode on that PF shall result in the
+ * PF receiving all host bound traffic with matching
+ * outer VLAN.
+ * # A VF shall can be set in the promiscuous mode.
+ * In the promiscuous mode, the VF does not receive any
+ * traffic unless a unique outer VLAN for the
+ * VF is specified. If a unique outer VLAN
+ * for the VF is specified, then the setting of
+ * promiscuous mode on that VF shall result in the
+ * VF receiving all host bound traffic with the
+ * matching outer VLAN.
+ * # The HWRM shall allow the setting of promiscuous
+ * mode on a function independently from the
+ * promiscuous mode settings on other functions.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS \
+ UINT32_C(0x10)
+ /*
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for the outermost Layer 2 destination MAC
+ * address field.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY UINT32_C(0x40)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST \
+ UINT32_C(0x20)
/*
- * If this flag is set, the corresponding RX filters shall be
- * set up to cover multicast/broadcast filters for non-VLAN
- * tagged packets and VLAN-tagged packets that match the TPID
- * and VID fields of VLAN tags in the VLAN tag table specified
- * in this command.
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for the VLAN-tagged packets that match the
+ * TPID and VID fields of VLAN tags in the VLAN tag
+ * table specified in this command.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN UINT32_C(0x80)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY \
+ UINT32_C(0x40)
/*
- * If this flag is set, the corresponding RX filters shall be
- * set up to cover multicast/broadcast filters for non-VLAN
- * tagged packets and VLAN-tagged packets matching any VLAN tag.
- * If this flag is set, then the HWRM shall ignore VLAN tags
- * specified in vlan_tag_tbl. If none of vlanonly, vlan_nonvlan,
- * and anyvlan_nonvlan flags is set, then the HWRM shall ignore
- * VLAN tags specified in vlan_tag_tbl. The HWRM client shall
- * set at most one flag out of vlanonly, vlan_nonvlan, and
- * anyvlan_nonvlan.
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for non-VLAN tagged packets and VLAN-tagged
+ * packets that match the TPID and VID fields of VLAN
+ * tags in the VLAN tag table specified in this command.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN \
+ UINT32_C(0x80)
+ /*
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for non-VLAN tagged packets and VLAN-tagged
+ * packets matching any VLAN tag.
+ *
+ * If this flag is set, then the HWRM shall ignore
+ * VLAN tags specified in vlan_tag_tbl.
+ *
+ * If none of vlanonly, vlan_nonvlan, and anyvlan_nonvlan
+ * flags is set, then the HWRM shall ignore
+ * VLAN tags specified in vlan_tag_tbl.
+ *
+ * The HWRM client shall set at most one flag out of
+ * vlanonly, vlan_nonvlan, and anyvlan_nonvlan.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \
UINT32_C(0x100)
- uint64_t mc_tbl_addr;
/* This is the address for mcast address tbl. */
- uint32_t num_mc_entries;
+ uint64_t mc_tbl_addr;
/*
* This value indicates how many entries in mc_tbl are valid.
* Each entry is 6 bytes.
*/
- uint32_t unused_0;
- uint64_t vlan_tag_tbl_addr;
+ uint32_t num_mc_entries;
+ uint8_t unused_0[4];
/*
- * This is the address for VLAN tag table. Each VLAN entry in
- * the table is 4 bytes of a VLAN tag including TPID, PCP, DEI,
- * and VID fields in network byte order.
+ * This is the address for VLAN tag table.
+ * Each VLAN entry in the table is 4 bytes of a VLAN tag
+ * including TPID, PCP, DEI, and VID fields in network byte
+ * order.
*/
- uint32_t num_vlan_tags;
+ uint64_t vlan_tag_tbl_addr;
/*
* This value indicates how many entries in vlan_tag_tbl are
* valid. Each entry is 4 bytes.
*/
- uint32_t unused_1;
+ uint32_t num_vlan_tags;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
struct hwrm_cfa_l2_set_rx_mask_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* Unable to complete operation due to conflict with Ntuple Filter */
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \
+ UINT32_C(0x1)
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST \
+ HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_vlan_antispoof_cfg *
+ *******************************/
+
+
+/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */
+struct hwrm_cfa_vlan_antispoof_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being configured.
+ * Only valid for a VF FID configured by the PF.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+ /* Number of VLAN entries in the vlan_tag_mask_tbl. */
+ uint32_t num_vlan_entries;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN
+ * antispoof table. Each table entry contains the 16-bit TPID
+ * (0x8100 or 0x88a8 only), 16-bit VLAN ID, and a 16-bit mask,
+ * all in network order to match hwrm_cfa_l2_set_rx_mask.
+ * For an individual VLAN entry, the mask value should be 0xfff
+ * for the 12-bit VLAN ID.
*/
+ uint64_t vlan_tag_mask_tbl_addr;
} __attribute__((packed));
-/* Command specific Error Codes (8 bytes) */
-struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- uint8_t code;
+/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */
+struct hwrm_cfa_vlan_antispoof_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * command specific error codes that goes to the cmd_err field
- * in Common HWRM Error Response.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- /* Unknown error */
- #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_vlan_antispoof_qcfg *
+ ********************************/
+
+
+/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */
+struct hwrm_cfa_vlan_antispoof_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * Only valid for a VF FID queried by the PF.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
/*
- * Unable to complete operation due to conflict
- * with Ntuple Filter
+ * Maximum number of VLAN entries the firmware is allowed to DMA
+ * to vlan_tag_mask_tbl.
*/
- #define \
- HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \
- UINT32_C(0x1)
- uint8_t unused_0[7];
+ uint32_t max_vlan_entries;
+ /*
+ * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN
+ * antispoof table to which firmware will DMA to. Each table
+ * entry will contain the 16-bit TPID (0x8100 or 0x88a8 only),
+ * 16-bit VLAN ID, and a 16-bit mask, all in network order to
+ * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry,
+ * the mask value should be 0xfff for the 12-bit VLAN ID.
+ */
+ uint64_t vlan_tag_mask_tbl_addr;
} __attribute__((packed));
-/* hwrm_cfa_vlan_antispoof_cfg */
-/* Description: Configures vlan anti-spoof filters for VF. */
-/* Input (32 bytes) */
-struct hwrm_cfa_vlan_antispoof_cfg_input {
- uint16_t req_type;
+/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */
+struct hwrm_cfa_vlan_antispoof_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of valid entries DMAd by firmware to vlan_tag_mask_tbl. */
+ uint32_t num_vlan_entries;
+ uint8_t unused_0[3];
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_tunnel_filter_alloc *
+ ********************************/
+
+
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x1)
+ uint32_t enables;
/*
- * Function ID of the function that is being configured. Only valid for
- * a VF FID configured by the PF.
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t num_vlan_entries;
- /* Number of VLAN entries in the vlan_tag_mask_tbl. */
- uint64_t vlan_tag_mask_tbl_addr;
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the l2_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the l2_ivlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the l3_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the l3_addr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR_TYPE \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the t_l3_addr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR_TYPE \
+ UINT32_C(0x20)
/*
- * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN antispoof
- * table. Each table entry contains the 16-bit TPID (0x8100 or 0x88a8
- * only), 16-bit VLAN ID, and a 16-bit mask, all in network order to
- * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, the mask
- * value should be 0xfff for the 12-bit VLAN ID.
+ * This bit must be '1' for the t_l3_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the vni field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_VNI \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the dst_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_DST_VNIC_ID \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x400)
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+ /*
+ * This value sets the match value for the inner L2
+ * MAC address.
+ * Destination MAC address for RX path.
+ * Source MAC address for TX path.
*/
-};
+ uint8_t l2_addr[6];
+ /*
+ * This value sets VLAN ID value for inner VLAN.
+ * Only 12-bits of VLAN ID are used in setting the filter.
+ */
+ uint16_t l2_ivlan;
+ /*
+ * The value of inner destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t l3_addr[4];
+ /*
+ * The value of tunnel destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t t_l3_addr[4];
+ /*
+ * This value indicates the type of inner IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
+ */
+ uint8_t l3_addr_type;
+ /*
+ * This value indicates the type of tunnel IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
+ */
+ uint8_t t_l3_addr_type;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ /*
+ * tunnel_flags allows the user to indicate the tunnel tag detection
+ * for the tunnel type specified in tunnel_type.
+ */
+ uint8_t tunnel_flags;
+ /*
+ * If the tunnel_type is geneve, then this bit indicates if we
+ * need to match the geneve OAM packet.
+ * If the tunnel_type is nvgre or gre, then this bit indicates if
+ * we need to detect checksum present bit in geneve header.
+ * If the tunnel_type is mpls, then this bit indicates if we need
+ * to match mpls packet with explicit IPV4/IPV6 null header.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR \
+ UINT32_C(0x1)
+ /*
+ * If the tunnel_type is geneve, then this bit indicates if we
+ * need to detect the critical option bit set in the oam packet.
+ * If the tunnel_type is nvgre or gre, then this bit indicates
+ * if we need to match nvgre packets with key present bit set in
+ * gre header.
+ * If the tunnel_type is mpls, then this bit indicates if we
+ * need to match mpls packet with S bit from inner/second label.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 \
+ UINT32_C(0x2)
+ /*
+ * If the tunnel_type is geneve, then this bit indicates if we
+ * need to match geneve packet with extended header bit set in
+ * geneve header.
+ * If the tunnel_type is nvgre or gre, then this bit indicates
+ * if we need to match nvgre packets with sequence number
+ * present bit set in gre header.
+ * If the tunnel_type is mpls, then this bit indicates if we
+ * need to match mpls packet with S bit from out/first label.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 \
+ UINT32_C(0x4)
+ /*
+ * Virtual Network Identifier (VNI). Only valid with
+ * tunnel_types VXLAN, NVGRE, and Geneve.
+ * Only lower 24-bits of VNI field are used
+ * in setting up the filter.
+ */
+ uint32_t vni;
+ /* Logical VNIC ID of the destination VNIC. */
+ uint32_t dst_vnic_id;
+ /*
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint32_t mirror_vnic_id;
+} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_cfa_vlan_antispoof_cfg_output {
- uint16_t error_code;
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t tunnel_filter_id;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint32_t flow_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_tunnel_filter_free *
+ *******************************/
+
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t tunnel_filter_id;
+} __attribute__((packed));
+
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************************
+ * hwrm_cfa_redirect_tunnel_type_alloc *
+ ***************************************/
+
+
+/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */
+struct hwrm_cfa_redirect_tunnel_type_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
-};
+ uint64_t resp_addr;
+ /* The destination function id, to whom the traffic is redirected. */
+ uint16_t dest_fid;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ /* Tunnel alloc flags. */
+ uint8_t flags;
+ /* Setting of this flag indicates modify existing redirect tunnel to new destination function ID. */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_FLAGS_MODIFY_DST \
+ UINT32_C(0x1)
+ uint8_t unused_0[4];
+} __attribute__((packed));
-/* hwrm_cfa_ntuple_filter_alloc */
-/*
- * Description: This is a ntuple filter that uses fields from L4/L3 header and
- * optionally fields from L2. The ntuple filters apply to receive traffic only.
- * All L2/L3/L4 header fields are specified in network byte order. These filters
- * can be used for Receive Flow Steering (RFS). # For ethertype value, only
- * 0x0800 (IPv4) and 0x86dd (IPv6) shall be supported for ntuple filters. # If a
- * field specified in this command is not enabled as a valid field, then that
- * field shall not be used in matching packet header fields against this filter.
- */
-/* Input (128 bytes) */
-struct hwrm_cfa_ntuple_filter_alloc_input {
- uint16_t req_type;
+/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */
+struct hwrm_cfa_redirect_tunnel_type_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************************
+ * hwrm_cfa_redirect_tunnel_type_free *
+ **************************************/
+
+
+/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */
+struct hwrm_cfa_redirect_tunnel_type_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The destination function id, to whom the traffic is redirected. */
+ uint16_t dest_fid;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */
+struct hwrm_cfa_redirect_tunnel_type_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************************
+ * hwrm_cfa_redirect_tunnel_type_info *
+ **************************************/
+
+
+/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */
+struct hwrm_cfa_redirect_tunnel_type_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The source function id. */
+ uint16_t src_fid;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */
+struct hwrm_cfa_redirect_tunnel_type_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The destination function id, to whom the traffic is redirected. */
+ uint16_t dest_fid;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ /* IPv4 version and header length. */
+ uint8_t ver_hlen;
+ /* IPv4 header length */
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK UINT32_C(0xf)
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ /* Version */
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK UINT32_C(0xf0)
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ /* IPv4 type of service. */
+ uint8_t tos;
+ /* IPv4 identification. */
+ uint16_t ip_id;
+ /* IPv4 flags and offset. */
+ uint16_t flags_frag_offset;
+ /* IPv4 TTL. */
+ uint8_t ttl;
+ /* IPv4 protocol. */
+ uint8_t protocol;
+ /* IPv4 source address. */
+ uint32_t src_ip_addr;
+ /* IPv4 destination address. */
+ uint32_t dest_ip_addr;
+} __attribute__((packed));
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ /* IPv6 version, traffic class and flow label. */
+ uint32_t ver_tc_flow_label;
+ /* IPv6 version shift */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT \
+ UINT32_C(0x1c)
+ /* IPv6 version mask */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK \
+ UINT32_C(0xf0000000)
+ /* IPv6 TC shift */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT \
+ UINT32_C(0x14)
+ /* IPv6 TC mask */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK \
+ UINT32_C(0xff00000)
+ /* IPv6 flow label shift */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT \
+ UINT32_C(0x0)
+ /* IPv6 flow label mask */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK \
+ UINT32_C(0xfffff)
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST \
+ HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ /* IPv6 payload length. */
+ uint16_t payload_len;
+ /* IPv6 next header. */
+ uint8_t next_hdr;
+ /* IPv6 TTL. */
+ uint8_t ttl;
+ /* IPv6 source address. */
+ uint32_t src_ip_addr[4];
+ /* IPv6 destination address. */
+ uint32_t dest_ip_addr[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+struct hwrm_cfa_encap_data_vxlan {
+ /* Source MAC address. */
+ uint8_t src_mac_addr[6];
+ /* reserved. */
+ uint16_t unused_0;
+ /* Destination MAC address. */
+ uint8_t dst_mac_addr[6];
+ /* Number of VLAN tags. */
+ uint8_t num_vlan_tags;
+ /* reserved. */
+ uint8_t unused_1;
+ /* Outer VLAN TPID. */
+ uint16_t ovlan_tpid;
+ /* Outer VLAN TCI. */
+ uint16_t ovlan_tci;
+ /* Inner VLAN TPID. */
+ uint16_t ivlan_tpid;
+ /* Inner VLAN TCI. */
+ uint16_t ivlan_tci;
+ /* L3 header fields. */
+ uint32_t l3[10];
+ /* IP version mask. */
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_MASK UINT32_C(0xf)
+ /* IP version 4. */
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 UINT32_C(0x4)
+ /* IP version 6. */
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 UINT32_C(0x6)
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_LAST \
+ HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ /* UDP source port. */
+ uint16_t src_port;
+ /* UDP destination port. */
+ uint16_t dst_port;
+ /* VXLAN Network Identifier. */
+ uint32_t vni;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_encap_record_alloc *
+ *******************************/
+
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
+struct hwrm_cfa_encap_record_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x1)
+ /* Encapsulation Type. */
+ uint8_t encap_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) after inside Ethernet payload */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* VLAN */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VLAN \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE \
+ UINT32_C(0x8)
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_LAST \
+ HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE
+ uint8_t unused_0[3];
+ /* This value is encap data used for the given encap type. */
+ uint32_t encap_data[20];
+} __attribute__((packed));
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t encap_record_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_encap_record_free *
+ ******************************/
+
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
+struct hwrm_cfa_encap_record_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t flags;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t encap_record_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_ntuple_filter_alloc *
+ ********************************/
+
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Setting of this flag indicates the applicability to the
- * loopback path.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
UINT32_C(0x1)
/*
- * Setting of this flag indicates drop action. If this flag is
- * not set, then it should be considered accept action.
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x2)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP \
+ UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates that a meter is expected to be attached
+ * to this flow. This hint can be used when choosing the action record
+ * format required for the flow.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER \
+ UINT32_C(0x4)
+ uint32_t enables;
/*
- * Setting of this flag indicates that a meter is expected to be
- * attached to this flow. This hint can be used when choosing
- * the action record format required for the flow.
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER UINT32_C(0x4)
- uint32_t enables;
- /* This bit must be '1' for the l2_filter_id field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
UINT32_C(0x1)
- /* This bit must be '1' for the ethertype field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
UINT32_C(0x2)
- /* This bit must be '1' for the tunnel_type field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
UINT32_C(0x4)
- /* This bit must be '1' for the src_macaddr field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ /*
+ * This bit must be '1' for the src_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \
UINT32_C(0x8)
- /* This bit must be '1' for the ipaddr_type field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
UINT32_C(0x10)
- /* This bit must be '1' for the src_ipaddr field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
UINT32_C(0x20)
/*
* This bit must be '1' for the src_ipaddr_mask field to be
@@ -9932,8 +21709,11 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \
UINT32_C(0x40)
- /* This bit must be '1' for the dst_ipaddr field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
UINT32_C(0x80)
/*
* This bit must be '1' for the dst_ipaddr_mask field to be
@@ -9941,29 +21721,41 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \
UINT32_C(0x100)
- /* This bit must be '1' for the ip_protocol field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
UINT32_C(0x200)
- /* This bit must be '1' for the src_port field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
UINT32_C(0x400)
/*
* This bit must be '1' for the src_port_mask field to be
* configured.
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \
UINT32_C(0x800)
- /* This bit must be '1' for the dst_port field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
UINT32_C(0x1000)
/*
* This bit must be '1' for the dst_port_mask field to be
* configured.
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \
UINT32_C(0x2000)
- /* This bit must be '1' for the pri_hint field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \
+ /*
+ * This bit must be '1' for the pri_hint field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \
UINT32_C(0x4000)
/*
* This bit must be '1' for the ntuple_filter_id field to be
@@ -9971,8 +21763,11 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \
UINT32_C(0x8000)
- /* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
UINT32_C(0x10000)
/*
* This bit must be '1' for the mirror_vnic_id field to be
@@ -9980,26 +21775,31 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
UINT32_C(0x20000)
- /* This bit must be '1' for the dst_macaddr field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ /*
+ * This bit must be '1' for the dst_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \
UINT32_C(0x40000)
- uint64_t l2_filter_id;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
- uint8_t src_macaddr[6];
+ uint64_t l2_filter_id;
/*
- * This value indicates the source MAC address in the Ethernet
- * header.
+ * This value indicates the source MAC address in
+ * the Ethernet header.
*/
- uint16_t ethertype;
+ uint8_t src_macaddr[6];
/* This value indicates the ethertype in the Ethernet header. */
- uint8_t ip_addr_type;
+ uint16_t ethertype;
/*
- * This value indicates the type of IP address. 4 - IPv4 6 -
- * IPv6 All others are invalid.
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
*/
+ uint8_t ip_addr_type;
/* invalid */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \
UINT32_C(0x0)
@@ -10009,11 +21809,15 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
/* IPv6 */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
UINT32_C(0x6)
- uint8_t ip_protocol;
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header. Applies to UDP and
- * TCP traffic. 6 - TCP 17 - UDP
+ * The value of protocol filed in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
*/
+ uint8_t ip_protocol;
/* invalid */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
UINT32_C(0x0)
@@ -10023,283 +21827,284 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
/* UDP */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \
UINT32_C(0x11)
- uint16_t dst_id;
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
+ */
+ uint16_t dst_id;
/*
- * If set, this value shall represent the Logical VNIC ID of the
- * destination VNIC for the RX path and network port id of the
- * destination port for the TX path.
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
*/
- uint16_t mirror_vnic_id;
- /* Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint8_t tunnel_type;
+ uint16_t mirror_vnic_id;
/*
- * This value indicates the tunnel type for this filter. If this
- * field is not specified, then the filter shall apply to both
- * non-tunneled and tunneled packets. If this field conflicts
- * with the tunnel_type specified in the l2_filter_id, then the
- * HWRM shall return an error for this command.
+ * This value indicates the tunnel type for this filter.
+ * If this field is not specified, then the filter shall
+ * apply to both non-tunneled and tunneled packets.
+ * If this field conflicts with the tunnel_type specified
+ * in the l2_filter_id, then the HWRM shall return an
+ * error for this command.
*/
+ uint8_t tunnel_type;
/* Non-tunnel */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
UINT32_C(0x0)
- /* Virtual eXtensible Local Area Network (VXLAN) */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
UINT32_C(0x1)
- /*
- * Network Virtualization Generic Routing
- * Encapsulation (NVGRE)
- */
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
UINT32_C(0x2)
- /*
- * Generic Routing Encapsulation (GRE) inside
- * Ethernet payload
- */
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
UINT32_C(0x3)
/* IP in IP */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
UINT32_C(0x4)
- /* Generic Network Virtualization Encapsulation (Geneve) */
+ /* Generic Network Virtualization Encapsulation (Geneve) */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
UINT32_C(0x5)
- /* Multi-Protocol Lable Switching (MPLS) */
+ /* Multi-Protocol Lable Switching (MPLS) */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
UINT32_C(0x6)
- /* Stateless Transport Tunnel (STT) */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
- /*
- * Generic Routing Encapsulation (GRE) inside IP
- * datagram payload
- */
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
/* Any tunneled traffic */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
UINT32_C(0xff)
- uint8_t pri_hint;
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
/*
- * This hint is provided to help in placing the filter in the
- * filter table.
+ * This hint is provided to help in placing
+ * the filter in the filter table.
*/
+ uint8_t pri_hint;
/* No preference */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
UINT32_C(0x0)
/* Above the given filter */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE UINT32_C(0x1)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE \
+ UINT32_C(0x1)
/* Below the given filter */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW UINT32_C(0x2)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW \
+ UINT32_C(0x2)
/* As high as possible */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \
UINT32_C(0x3)
/* As low as possible */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST UINT32_C(0x4)
- uint32_t src_ipaddr[4];
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST \
+ UINT32_C(0x4)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST
/*
- * The value of source IP address to be used in filtering. For
- * IPv4, first four bytes represent the IP address.
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- uint32_t src_ipaddr_mask[4];
+ uint32_t src_ipaddr[4];
/*
- * The value of source IP address mask to be used in filtering.
+ * The value of source IP address mask to be used in
+ * filtering.
* For IPv4, first four bytes represent the IP address mask.
*/
- uint32_t dst_ipaddr[4];
+ uint32_t src_ipaddr_mask[4];
/*
* The value of destination IP address to be used in filtering.
* For IPv4, first four bytes represent the IP address.
*/
- uint32_t dst_ipaddr_mask[4];
+ uint32_t dst_ipaddr[4];
/*
* The value of destination IP address mask to be used in
- * filtering. For IPv4, first four bytes represent the IP
- * address mask.
+ * filtering.
+ * For IPv4, first four bytes represent the IP address mask.
*/
- uint16_t src_port;
+ uint32_t dst_ipaddr_mask[4];
/*
- * The value of source port to be used in filtering. Applies to
- * UDP and TCP traffic.
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
*/
- uint16_t src_port_mask;
+ uint16_t src_port;
/*
* The value of source port mask to be used in filtering.
* Applies to UDP and TCP traffic.
*/
- uint16_t dst_port;
+ uint16_t src_port_mask;
/*
* The value of destination port to be used in filtering.
* Applies to UDP and TCP traffic.
*/
- uint16_t dst_port_mask;
+ uint16_t dst_port;
/*
- * The value of destination port mask to be used in filtering.
+ * The value of destination port mask to be used in
+ * filtering.
* Applies to UDP and TCP traffic.
*/
- uint64_t ntuple_filter_id_hint;
- /* This is the ID of the filter that goes along with the pri_hint. */
+ uint16_t dst_port_mask;
+ /*
+ * This is the ID of the filter that goes along with
+ * the pri_hint.
+ */
+ uint64_t ntuple_filter_id_hint;
} __attribute__((packed));
-/* Output (24 bytes) */
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint64_t ntuple_filter_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* This value is an opaque id into CFA data structures. */
- uint32_t flow_id;
+ uint64_t ntuple_filter_id;
/*
- * This is the ID of the flow associated with this filter. This
- * value shall be used to match and associate the flow
- * identifier returned in completion records. A value of
- * 0xFFFFFFFF shall indicate no flow id.
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint32_t flow_id;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- uint8_t code;
/*
- * command specific error codes that goes to the cmd_err field
- * in Common HWRM Error Response.
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
*/
+ uint8_t code;
/* Unknown error */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
- /*
- * Unable to complete operation due to conflict
- * with Rx Mask VLAN
- */
- #define \
- HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \
- UINT32_C(0x1)
- uint8_t unused_0[7];
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* Unable to complete operation due to conflict with Rx Mask VLAN */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \
+ UINT32_C(0x1)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+ uint8_t unused_0[7];
} __attribute__((packed));
-/* hwrm_cfa_ntuple_filter_free */
-/* Description: Free an ntuple filter */
-/* Input (24 bytes) */
+/*******************************
+ * hwrm_cfa_ntuple_filter_free *
+ *******************************/
+
+
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint64_t ntuple_filter_id;
+ uint64_t resp_addr;
/* This value is an opaque id into CFA data structures. */
+ uint64_t ntuple_filter_id;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_free_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_ntuple_filter_cfg */
-/*
- * Description: Configure an ntuple filter with a new destination VNIC and/or
- * meter.
- */
-/* Input (48 bytes) */
+/******************************
+ * hwrm_cfa_ntuple_filter_cfg *
+ ******************************/
+
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_ntuple_filter_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t enables;
- /* This bit must be '1' for the new_dst_id field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the new_dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \
UINT32_C(0x1)
/*
* This bit must be '1' for the new_mirror_vnic_id field to be
@@ -10308,2209 +22113,5990 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
#define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
UINT32_C(0x2)
/*
- * This bit must be '1' for the new_meter_instance_id field to
- * be configured.
+ * This bit must be '1' for the new_meter_instance_id field to be
+ * configured.
*/
#define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
UINT32_C(0x4)
- uint32_t unused_0;
- uint64_t ntuple_filter_id;
+ uint8_t unused_0[4];
/* This value is an opaque id into CFA data structures. */
- uint32_t new_dst_id;
+ uint64_t ntuple_filter_id;
/*
- * If set, this value shall represent the new Logical VNIC ID of
- * the destination VNIC for the RX path and new network port id
- * of the destination port for the TX path.
+ * If set, this value shall represent the new
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and new network port id of the destination port for
+ * the TX path.
*/
- uint32_t new_mirror_vnic_id;
- /* New Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint16_t new_meter_instance_id;
+ uint32_t new_dst_id;
/*
- * New meter to attach to the flow. Specifying the invalid
- * instance ID is used to remove any existing meter from the
- * flow.
+ * New Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
*/
+ uint32_t new_mirror_vnic_id;
/*
- * A value of 0xfff is considered invalid and
- * implies the instance is not configured.
+ * New meter to attach to the flow. Specifying the
+ * invalid instance ID is used to remove any existing
+ * meter from the flow.
+ */
+ uint16_t new_meter_instance_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
*/
#define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \
UINT32_C(0xffff)
- uint16_t unused_1[3];
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID
+ uint8_t unused_1[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_em_flow_alloc */
-/*
- * Description: This is a generic Exact Match (EM) flow that uses fields from
- * L4/L3/L2 headers. The EM flows apply to transmit and receive traffic. All
- * L2/L3/L4 header fields are specified in network byte order. For each EM flow,
- * there is an associated set of actions specified. For tunneled packets, all
- * L2/L3/L4 fields specified are fields of inner headers unless otherwise
- * specified. # If a field specified in this command is not enabled as a valid
- * field, then that field shall not be used in matching packet header fields
- * against this EM flow entry.
- */
-/* Input (112 bytes) */
+/**************************
+ * hwrm_cfa_em_flow_alloc *
+ **************************/
+
+
+/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */
struct hwrm_cfa_em_flow_alloc_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
* TX and RX paths of the chip.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX \
- (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
/* rx path */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX \
- (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \
- CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX
- /*
- * Setting of this flag indicates enabling of a byte counter for
- * a given flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2)
- /*
- * Setting of this flag indicates enabling of a packet counter
- * for a given flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4)
- /*
- * Setting of this flag indicates de-capsulation action for the
- * given flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8)
- /*
- * Setting of this flag indicates encapsulation action for the
- * given flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10)
- /*
- * Setting of this flag indicates drop action. If this flag is
- * not set, then it should be considered accept action.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20)
- /*
- * Setting of this flag indicates that a meter is expected to be
- * attached to this flow. This hint can be used when choosing
- * the action record format required for the flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40)
- uint32_t enables;
- /* This bit must be '1' for the l2_filter_id field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID UINT32_C(0x1)
- /* This bit must be '1' for the tunnel_type field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x2)
- /* This bit must be '1' for the tunnel_id field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID UINT32_C(0x4)
- /* This bit must be '1' for the src_macaddr field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR UINT32_C(0x8)
- /* This bit must be '1' for the dst_macaddr field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR UINT32_C(0x10)
- /* This bit must be '1' for the ovlan_vid field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID UINT32_C(0x20)
- /* This bit must be '1' for the ivlan_vid field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID UINT32_C(0x40)
- /* This bit must be '1' for the ethertype field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE UINT32_C(0x80)
- /* This bit must be '1' for the src_ipaddr field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR UINT32_C(0x100)
- /* This bit must be '1' for the dst_ipaddr field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR UINT32_C(0x200)
- /* This bit must be '1' for the ipaddr_type field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE UINT32_C(0x400)
- /* This bit must be '1' for the ip_protocol field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL UINT32_C(0x800)
- /* This bit must be '1' for the src_port field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT UINT32_C(0x1000)
- /* This bit must be '1' for the dst_port field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT UINT32_C(0x2000)
- /* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x4000)
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX
+ /*
+ * Setting of this flag indicates enabling of a byte counter for a given
+ * flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates enabling of a packet counter for a given
+ * flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4)
+ /* Setting of this flag indicates de-capsulation action for the given flow. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8)
+ /* Setting of this flag indicates encapsulation action for the given flow. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20)
+ /*
+ * Setting of this flag indicates that a meter is expected to be attached
+ * to this flow. This hint can be used when choosing the action record
+ * format required for the flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the tunnel_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the src_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the dst_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ovlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the ivlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x4000)
/*
* This bit must be '1' for the mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
UINT32_C(0x8000)
/*
* This bit must be '1' for the encap_record_id field to be
* configured.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \
UINT32_C(0x10000)
/*
* This bit must be '1' for the meter_instance_id field to be
* configured.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \
UINT32_C(0x20000)
- uint64_t l2_filter_id;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
- uint8_t tunnel_type;
+ uint64_t l2_filter_id;
/* Tunnel Type. */
+ uint8_t tunnel_type;
/* Non-tunnel */
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
UINT32_C(0x0)
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
- /*
- * Network Virtualization Generic Routing
- * Encapsulation (NVGRE)
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2)
- /*
- * Generic Routing Encapsulation (GRE) inside
- * Ethernet payload
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
/* IP in IP */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
- /* Multi-Protocol Lable Switching (MPLS) */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6)
- /* Stateless Transport Tunnel (STT) */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
- /*
- * Generic Routing Encapsulation (GRE) inside IP
- * datagram payload
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
- /*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
/* Any tunneled traffic */
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
UINT32_C(0xff)
- uint8_t unused_0;
- uint16_t unused_1;
- uint32_t tunnel_id;
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0[3];
/*
- * Tunnel identifier. Virtual Network Identifier (VNI). Only
- * valid with tunnel_types VXLAN, NVGRE, and Geneve. Only lower
- * 24-bits of VNI field are used in setting up the filter.
+ * Tunnel identifier.
+ * Virtual Network Identifier (VNI). Only valid with
+ * tunnel_types VXLAN, NVGRE, and Geneve.
+ * Only lower 24-bits of VNI field are used
+ * in setting up the filter.
*/
- uint8_t src_macaddr[6];
+ uint32_t tunnel_id;
/*
- * This value indicates the source MAC address in the Ethernet
- * header.
+ * This value indicates the source MAC address in
+ * the Ethernet header.
*/
- uint16_t meter_instance_id;
+ uint8_t src_macaddr[6];
/* The meter instance to attach to the flow. */
+ uint16_t meter_instance_id;
/*
- * A value of 0xfff is considered invalid and
- * implies the instance is not configured.
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \
UINT32_C(0xffff)
- uint8_t dst_macaddr[6];
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID
/*
- * This value indicates the destination MAC address in the
- * Ethernet header.
+ * This value indicates the destination MAC address in
+ * the Ethernet header.
*/
- uint16_t ovlan_vid;
+ uint8_t dst_macaddr[6];
/*
- * This value indicates the VLAN ID of the outer VLAN tag in the
- * Ethernet header.
+ * This value indicates the VLAN ID of the outer VLAN tag
+ * in the Ethernet header.
*/
- uint16_t ivlan_vid;
+ uint16_t ovlan_vid;
/*
- * This value indicates the VLAN ID of the inner VLAN tag in the
- * Ethernet header.
+ * This value indicates the VLAN ID of the inner VLAN tag
+ * in the Ethernet header.
*/
- uint16_t ethertype;
+ uint16_t ivlan_vid;
/* This value indicates the ethertype in the Ethernet header. */
- uint8_t ip_addr_type;
+ uint16_t ethertype;
/*
- * This value indicates the type of IP address. 4 - IPv4 6 -
- * IPv6 All others are invalid.
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
*/
+ uint8_t ip_addr_type;
/* invalid */
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0)
/* IPv4 */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4)
/* IPv6 */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
- uint8_t ip_protocol;
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header. Applies to UDP and
- * TCP traffic. 6 - TCP 17 - UDP
+ * The value of protocol filed in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
*/
+ uint8_t ip_protocol;
/* invalid */
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0)
/* TCP */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6)
/* UDP */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11)
- uint8_t unused_2;
- uint8_t unused_3;
- uint32_t src_ipaddr[4];
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP
+ uint8_t unused_1[2];
/*
- * The value of source IP address to be used in filtering. For
- * IPv4, first four bytes represent the IP address.
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- uint32_t dst_ipaddr[4];
+ uint32_t src_ipaddr[4];
/*
- * big_endian = True The value of destination IP address to be
- * used in filtering. For IPv4, first four bytes represent the
- * IP address.
+ * big_endian = True
+ * The value of destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- uint16_t src_port;
+ uint32_t dst_ipaddr[4];
/*
- * The value of source port to be used in filtering. Applies to
- * UDP and TCP traffic.
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
*/
- uint16_t dst_port;
+ uint16_t src_port;
/*
* The value of destination port to be used in filtering.
* Applies to UDP and TCP traffic.
*/
- uint16_t dst_id;
+ uint16_t dst_port;
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
+ */
+ uint16_t dst_id;
/*
- * If set, this value shall represent the Logical VNIC ID of the
- * destination VNIC for the RX path and network port id of the
- * destination port for the TX path.
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
*/
- uint16_t mirror_vnic_id;
- /* Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint32_t encap_record_id;
+ uint16_t mirror_vnic_id;
/* Logical ID of the encapsulation record. */
- uint32_t unused_4;
+ uint32_t encap_record_id;
+ uint8_t unused_2[4];
} __attribute__((packed));
-/* Output (24 bytes) */
+/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */
struct hwrm_cfa_em_flow_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint64_t em_filter_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* This value is an opaque id into CFA data structures. */
- uint32_t flow_id;
+ uint64_t em_filter_id;
/*
- * This is the ID of the flow associated with this filter. This
- * value shall be used to match and associate the flow
- * identifier returned in completion records. A value of
- * 0xFFFFFFFF shall indicate no flow id.
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint32_t flow_id;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_em_flow_free */
-/* Description: Free an EM flow table entry */
-/* Input (24 bytes) */
+/*************************
+ * hwrm_cfa_em_flow_free *
+ *************************/
+
+
+/* hwrm_cfa_em_flow_free_input (size:192b/24B) */
struct hwrm_cfa_em_flow_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint64_t em_filter_id;
+ uint64_t resp_addr;
/* This value is an opaque id into CFA data structures. */
+ uint64_t em_filter_id;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_em_flow_free_output (size:128b/16B) */
struct hwrm_cfa_em_flow_free_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_em_flow_cfg */
-/*
- * Description: Configure an EM flow with a new destination VNIC and/or meter.
- */
-/* Input (48 bytes) */
+/************************
+ * hwrm_cfa_em_flow_cfg *
+ ************************/
+
+
+/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */
struct hwrm_cfa_em_flow_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the new_dst_id field to be
+ * configured.
*/
- uint32_t enables;
- /* This bit must be '1' for the new_dst_id field to be configured. */
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID UINT32_C(0x1)
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID \
+ UINT32_C(0x1)
/*
* This bit must be '1' for the new_mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
UINT32_C(0x2)
/*
- * This bit must be '1' for the new_meter_instance_id field to
- * be configured.
+ * This bit must be '1' for the new_meter_instance_id field to be
+ * configured.
*/
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
UINT32_C(0x4)
- uint32_t unused_0;
- uint64_t em_filter_id;
+ uint8_t unused_0[4];
/* This value is an opaque id into CFA data structures. */
- uint32_t new_dst_id;
+ uint64_t em_filter_id;
/*
- * If set, this value shall represent the new Logical VNIC ID of
- * the destination VNIC for the RX path and network port id of
- * the destination port for the TX path.
+ * If set, this value shall represent the new
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
*/
- uint32_t new_mirror_vnic_id;
- /* New Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint16_t new_meter_instance_id;
+ uint32_t new_dst_id;
/*
- * New meter to attach to the flow. Specifying the invalid
- * instance ID is used to remove any existing meter from the
- * flow.
+ * New Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
*/
+ uint32_t new_mirror_vnic_id;
/*
- * A value of 0xfff is considered invalid and
- * implies the instance is not configured.
+ * New meter to attach to the flow. Specifying the
+ * invalid instance ID is used to remove any existing
+ * meter from the flow.
+ */
+ uint16_t new_meter_instance_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
*/
#define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \
UINT32_C(0xffff)
- uint16_t unused_1[3];
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID
+ uint8_t unused_1[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */
struct hwrm_cfa_em_flow_cfg_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_meter_profile_alloc *
+ ********************************/
+
+
+/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */
+struct hwrm_cfa_meter_profile_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX
+ /* The meter algorithm type. */
+ uint8_t meter_type;
+ /* RFC 2697 (srTCM) */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2697 \
+ UINT32_C(0x0)
+ /* RFC 2698 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2698 \
+ UINT32_C(0x1)
+ /* RFC 4115 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 \
+ UINT32_C(0x2)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This field is reserved for the future use.
+ * It shall be set to 0.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t reserved1;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is reserved for the future use.
+ * It shall be set to 0.
*/
+ uint32_t reserved2;
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t commit_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t commit_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t excess_peak_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t excess_peak_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
} __attribute__((packed));
-/* hwrm_tunnel_dst_port_query */
-/*
- * Description: This function is called by a driver to query tunnel type
- * specific destination port configuration.
- */
-/* Input (24 bytes) */
-struct hwrm_tunnel_dst_port_query_input {
- uint16_t req_type;
+/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */
+struct hwrm_cfa_meter_profile_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
*/
- uint16_t cmpl_ring;
+ #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID
+ uint8_t unused_0[5];
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_meter_profile_free *
+ *******************************/
+
+
+/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */
+struct hwrm_cfa_meter_profile_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint8_t tunnel_type;
- /* Tunnel Type. */
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX \
UINT32_C(0x1)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
- UINT32_C(0x5)
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0;
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
/*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
*/
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \
- UINT32_C(0x9)
- uint8_t unused_0[7];
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_tunnel_dst_port_query_output {
- uint16_t error_code;
+/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */
+struct hwrm_cfa_meter_profile_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_meter_profile_cfg *
+ ******************************/
+
+
+/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */
+struct hwrm_cfa_meter_profile_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t tunnel_dst_port_id;
+ uint16_t cmpl_ring;
/*
- * This field represents the identifier of L4 destination port
- * used for the given tunnel type. This field is valid for
- * specific tunnel types that use layer 4 (e.g. UDP) transports
- * for tunneling.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t tunnel_dst_port_val;
+ uint16_t seq_id;
/*
- * This field represents the value of L4 destination port
- * identified by tunnel_dst_port_id. This field is valid for
- * specific tunnel types that use layer 4 (e.g. UDP) transports
- * for tunneling. This field is in network byte order. A value
- * of 0 means that the destination port is not configured.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
*/
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX
+ /* The meter algorithm type. */
+ uint8_t meter_type;
+ /* RFC 2697 (srTCM) */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2697 \
+ UINT32_C(0x0)
+ /* RFC 2698 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2698 \
+ UINT32_C(0x1)
+ /* RFC 4115 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 \
+ UINT32_C(0x2)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
+ */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint32_t reserved;
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t commit_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t commit_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t excess_peak_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t excess_peak_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
} __attribute__((packed));
-/* hwrm_tunnel_dst_port_alloc */
-/*
- * Description: This function is called by a driver to allocate l4 destination
- * port for a specific tunnel type. The destination port value is provided in
- * the input. If the HWRM supports only one global destination port for a tunnel
- * type, then the HWRM shall keep track of its usage as described below. # The
- * first caller that allocates a destination port shall always succeed and the
- * HWRM shall save the destination port configuration for that tunnel type and
- * increment the usage count to 1. # Subsequent callers allocating the same
- * destination port for that tunnel type shall succeed and the HWRM shall
- * increment the usage count for that port for each subsequent caller that
- * succeeds. # Any subsequent caller trying to allocate a different destination
- * port for that tunnel type shall fail until the usage count for the original
- * destination port goes to zero. # A caller that frees a port will cause the
- * usage count for that port to decrement.
- */
-/* Input (24 bytes) */
-struct hwrm_tunnel_dst_port_alloc_input {
- uint16_t req_type;
+/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */
+struct hwrm_cfa_meter_profile_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************************
+ * hwrm_cfa_meter_instance_alloc *
+ *********************************/
+
+
+/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */
+struct hwrm_cfa_meter_instance_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint8_t tunnel_type;
- /* Tunnel Type. */
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
- UINT32_C(0x5)
+ uint16_t target_id;
/*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
- UINT32_C(0x9)
- uint8_t unused_0;
- uint16_t tunnel_dst_port_val;
+ uint64_t resp_addr;
+ uint8_t flags;
/*
- * This field represents the value of L4 destination port used
- * for the given tunnel type. This field is valid for specific
- * tunnel types that use layer 4 (e.g. UDP) transports for
- * tunneling. This field is in network byte order. A value of 0
- * shall fail the command.
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH \
+ UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0;
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
*/
- uint32_t unused_1;
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_tunnel_dst_port_alloc_output {
- uint16_t error_code;
+/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */
+struct hwrm_cfa_meter_instance_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value identifies a meter instance in CFA. */
+ uint16_t meter_instance_id;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_meter_instance_free *
+ ********************************/
+
+
+/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */
+struct hwrm_cfa_meter_instance_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t tunnel_dst_port_id;
+ uint16_t cmpl_ring;
/*
- * Identifier of a tunnel L4 destination port value. Only
- * applies to tunnel types that has l4 destination port
- * parameters.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t valid;
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0;
+ /* This value identifies a meter instance in CFA. */
+ uint16_t meter_instance_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
*/
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* hwrm_tunnel_dst_port_free */
-/*
- * Description: This function is called by a driver to free l4 destination port
- * for a specific tunnel type.
- */
-/* Input (24 bytes) */
-struct hwrm_tunnel_dst_port_free_input {
- uint16_t req_type;
+/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */
+struct hwrm_cfa_meter_instance_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_decap_filter_alloc *
+ *******************************/
+
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
+struct hwrm_cfa_decap_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint8_t tunnel_type;
- /* Tunnel Type. */
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ uint16_t target_id;
/*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* ovs_tunnel is 1 b */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_FLAGS_OVS_TUNNEL \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the tunnel_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the src_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the dst_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the ovlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_OVLAN_VID \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ivlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IVLAN_VID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the t_ovlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_OVLAN_VID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the t_ivlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_IVLAN_VID \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x10000)
+ /*
+ * Tunnel identifier.
+ * Virtual Network Identifier (VNI). Only valid with
+ * tunnel_types VXLAN, NVGRE, and Geneve.
+ * Only lower 24-bits of VNI field are used
+ * in setting up the filter.
+ */
+ uint32_t tunnel_id;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
UINT32_C(0x9)
- uint8_t unused_0;
- uint16_t tunnel_dst_port_id;
+ /* Any tunneled traffic */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0;
+ uint16_t unused_1;
/*
- * Identifier of a tunnel L4 destination port value. Only
- * applies to tunnel types that has l4 destination port
- * parameters.
+ * This value indicates the source MAC address in
+ * the Ethernet header.
*/
- uint32_t unused_1;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_tunnel_dst_port_free_output {
- uint16_t error_code;
+ uint8_t src_macaddr[6];
+ uint8_t unused_2[2];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This value indicates the destination MAC address in
+ * the Ethernet header.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t dst_macaddr[6];
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This value indicates the VLAN ID of the outer VLAN tag
+ * in the Ethernet header.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t ovlan_vid;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This value indicates the VLAN ID of the inner VLAN tag
+ * in the Ethernet header.
*/
-} __attribute__((packed));
-
-/* hwrm_stat_ctx_alloc */
-/*
- * Description: This command allocates and does basic preparation for a stat
- * context.
- */
-/* Input (32 bytes) */
-struct hwrm_stat_ctx_alloc_input {
- uint16_t req_type;
+ uint16_t ivlan_vid;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This value indicates the VLAN ID of the outer VLAN tag
+ * in the tunnel Ethernet header.
*/
- uint16_t cmpl_ring;
+ uint16_t t_ovlan_vid;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This value indicates the VLAN ID of the inner VLAN tag
+ * in the tunnel Ethernet header.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t t_ivlan_vid;
+ /* This value indicates the ethertype in the Ethernet header. */
+ uint16_t ethertype;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
*/
- uint64_t resp_addr;
+ uint8_t ip_addr_type;
+ /* invalid */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \
+ UINT32_C(0x0)
+ /* IPv4 */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
+ UINT32_C(0x4)
+ /* IPv6 */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ UINT32_C(0x6)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The value of protocol filed in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
*/
- uint64_t stats_dma_addr;
- /* This is the address for statistic block. */
- uint32_t update_period_ms;
+ uint8_t ip_protocol;
+ /* invalid */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
+ UINT32_C(0x0)
+ /* TCP */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \
+ UINT32_C(0x6)
+ /* UDP */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \
+ UINT32_C(0x11)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
+ uint16_t unused_3;
+ uint32_t unused_4;
/*
- * The statistic block update period in ms. e.g. 250ms, 500ms,
- * 750ms, 1000ms. If update_period_ms is 0, then the stats
- * update shall be never done and the DMA address shall not be
- * used. In this case, the stat block can only be read by
- * hwrm_stat_ctx_query command.
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- uint8_t stat_ctx_flags;
+ uint32_t src_ipaddr[4];
/*
- * This field is used to specify statistics context specific
- * configuration flags.
+ * The value of destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
+ uint32_t dst_ipaddr[4];
/*
- * When this bit is set to '1', the statistics context shall be
- * allocated for RoCE traffic only. In this case, traffic other
- * than offloaded RoCE traffic shall not be included in this
- * statistic context. When this bit is set to '0', the
- * statistics context shall be used for the network traffic
- * other than offloaded RoCE traffic.
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t src_port;
+ /*
+ * The value of destination port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t dst_port;
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path.
+ */
+ uint16_t dst_id;
+ /*
+ * If set, this value shall represent the L2 context that matches the L2
+ * information of the decap filter.
*/
- #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
- uint8_t unused_0[3];
+ uint16_t l2_ctxt_ref_id;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_stat_ctx_alloc_output {
- uint16_t error_code;
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t decap_filter_id;
+ uint8_t unused_0[3];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_decap_filter_free *
+ ******************************/
+
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_decap_filter_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t stat_ctx_id;
- /* This is the statistics context ID value. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t decap_filter_id;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* hwrm_stat_ctx_free */
-/* Description: This command is used to free a stat context. */
-/* Input (24 bytes) */
-struct hwrm_stat_ctx_free_input {
- uint16_t req_type;
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_flow_alloc *
+ ***********************/
+
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_flow_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t stat_ctx_id;
- /* ID of the statistics context that is being queried. */
- uint32_t unused_0;
+ uint64_t resp_addr;
+ uint16_t flags;
+ /* tunnel is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_TUNNEL UINT32_C(0x1)
+ /* num_vlan is 2 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_MASK UINT32_C(0x6)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_SFT 1
+ /* no tags */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_NONE \
+ (UINT32_C(0x0) << 1)
+ /* 1 tag */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_ONE \
+ (UINT32_C(0x1) << 1)
+ /* 2 tags */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO \
+ (UINT32_C(0x2) << 1)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_LAST \
+ HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO
+ /* Enumeration denoting the Flow Type. */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_MASK UINT32_C(0x38)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_SFT 3
+ /* L2 flow */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_L2 \
+ (UINT32_C(0x0) << 3)
+ /* IPV4 flow */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV4 \
+ (UINT32_C(0x1) << 3)
+ /* IPV6 flow */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6 \
+ (UINT32_C(0x2) << 3)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_LAST \
+ HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6
+ /*
+ * Tx Flow: vf fid.
+ * Rx Flow: pf fid.
+ */
+ uint16_t src_fid;
+ /* Tunnel handle valid when tunnel flag is set. */
+ uint32_t tunnel_handle;
+ uint16_t action_flags;
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_FWD \
+ UINT32_C(0x1)
+ /* recycle is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_RECYCLE \
+ UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_DROP \
+ UINT32_C(0x4)
+ /* meter is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_METER \
+ UINT32_C(0x8)
+ /* tunnel is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TUNNEL \
+ UINT32_C(0x10)
+ /* nat_src is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_SRC \
+ UINT32_C(0x20)
+ /* nat_dest is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_DEST \
+ UINT32_C(0x40)
+ /* nat_ipv4_address is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_IPV4_ADDRESS \
+ UINT32_C(0x80)
+ /* l2_header_rewrite is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_L2_HEADER_REWRITE \
+ UINT32_C(0x100)
+ /* ttl_decrement is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TTL_DECREMENT \
+ UINT32_C(0x200)
+ /*
+ * Tx Flow: pf or vf fid.
+ * Rx Flow: vf fid.
+ */
+ uint16_t dst_fid;
+ /* VLAN tpid, valid when push_vlan flag is set. */
+ uint16_t l2_rewrite_vlan_tpid;
+ /* VLAN tci, valid when push_vlan flag is set. */
+ uint16_t l2_rewrite_vlan_tci;
+ /* Meter id, valid when meter flag is set. */
+ uint16_t act_meter_id;
+ /* Flow with the same l2 context tcam key. */
+ uint16_t ref_flow_handle;
+ /* This value sets the match value for the ethertype. */
+ uint16_t ethertype;
+ /* valid when num tags is 1 or 2. */
+ uint16_t outer_vlan_tci;
+ /* This value sets the match value for the Destination MAC address. */
+ uint16_t dmac[3];
+ /* valid when num tags is 2. */
+ uint16_t inner_vlan_tci;
+ /* This value sets the match value for the Source MAC address. */
+ uint16_t smac[3];
+ /* The bit length of destination IP address mask. */
+ uint8_t ip_dst_mask_len;
+ /* The bit length of source IP address mask. */
+ uint8_t ip_src_mask_len;
+ /* The value of destination IPv4/IPv6 address. */
+ uint32_t ip_dst[4];
+ /* The source IPv4/IPv6 address. */
+ uint32_t ip_src[4];
+ /*
+ * The value of source port.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_src_port;
+ /*
+ * The value of source port mask.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_src_port_mask;
+ /*
+ * The value of destination port.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_dst_port;
+ /*
+ * The value of destination port mask.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_dst_port_mask;
+ /*
+ * NAT IPv4/6 address based on address type flag.
+ * 0 values are ignored.
+ */
+ uint32_t nat_ip_address[4];
+ /* L2 header re-write Destination MAC address. */
+ uint16_t l2_rewrite_dmac[3];
+ /*
+ * The NAT source/destination port based on direction flag.
+ * Applies to UDP and TCP traffic.
+ * 0 values are ignored.
+ */
+ uint16_t nat_port;
+ /* L2 header re-write Source MAC address. */
+ uint16_t l2_rewrite_smac[3];
+ /* The value of ip protocol. */
+ uint8_t ip_proto;
+ uint8_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_stat_ctx_free_output {
- uint16_t error_code;
+/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
+struct hwrm_cfa_flow_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ uint8_t unused_0[5];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_flow_free *
+ **********************/
+
+
+/* hwrm_cfa_flow_free_input (size:192b/24B) */
+struct hwrm_cfa_flow_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t stat_ctx_id;
- /* This is the statistics context ID value. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_stat_ctx_query */
-/* Description: This command returns statistics of a context. */
-/* Input (24 bytes) */
-struct hwrm_stat_ctx_query_input {
- uint16_t req_type;
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
+struct hwrm_cfa_flow_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* packet is 64 b */
+ uint64_t packet;
+ /* byte is 64 b */
+ uint64_t byte;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_flow_info *
+ **********************/
+
+
+/* hwrm_cfa_flow_info_input (size:192b/24B) */
+struct hwrm_cfa_flow_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t stat_ctx_id;
- /* ID of the statistics context that is being queried. */
- uint32_t unused_0;
+ uint64_t resp_addr;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ /* Max flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_MASK \
+ UINT32_C(0xfff)
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_SFT 0
+ /* CNP flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT \
+ UINT32_C(0x1000)
+ /* Direction rx = 1 */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX \
+ UINT32_C(0x8000)
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (176 bytes) */
-struct hwrm_stat_ctx_query_output {
- uint16_t error_code;
+/* hwrm_cfa_flow_info_output (size:448b/56B) */
+struct hwrm_cfa_flow_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* flags is 8 b */
+ uint8_t flags;
+ /* profile is 8 b */
+ uint8_t profile;
+ /* src_fid is 16 b */
+ uint16_t src_fid;
+ /* dst_fid is 16 b */
+ uint16_t dst_fid;
+ /* l2_ctxt_id is 16 b */
+ uint16_t l2_ctxt_id;
+ /* em_info is 64 b */
+ uint64_t em_info;
+ /* tcam_info is 64 b */
+ uint64_t tcam_info;
+ /* vfp_tcam_info is 64 b */
+ uint64_t vfp_tcam_info;
+ /* ar_id is 16 b */
+ uint16_t ar_id;
+ /* flow_handle is 16 b */
+ uint16_t flow_handle;
+ /* tunnel_handle is 32 b */
+ uint32_t tunnel_handle;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_flow_flush *
+ ***********************/
+
+
+/* hwrm_cfa_flow_flush_input (size:192b/24B) */
+struct hwrm_cfa_flow_flush_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t tx_ucast_pkts;
- /* Number of transmitted unicast packets */
- uint64_t tx_mcast_pkts;
- /* Number of transmitted multicast packets */
- uint64_t tx_bcast_pkts;
- /* Number of transmitted broadcast packets */
- uint64_t tx_err_pkts;
- /* Number of transmitted packets with error */
- uint64_t tx_drop_pkts;
- /* Number of dropped packets on transmit path */
- uint64_t tx_ucast_bytes;
- /* Number of transmitted bytes for unicast traffic */
- uint64_t tx_mcast_bytes;
- /* Number of transmitted bytes for multicast traffic */
- uint64_t tx_bcast_bytes;
- /* Number of transmitted bytes for broadcast traffic */
- uint64_t rx_ucast_pkts;
- /* Number of received unicast packets */
- uint64_t rx_mcast_pkts;
- /* Number of received multicast packets */
- uint64_t rx_bcast_pkts;
- /* Number of received broadcast packets */
- uint64_t rx_err_pkts;
- /* Number of received packets with error */
- uint64_t rx_drop_pkts;
- /* Number of dropped packets on received path */
- uint64_t rx_ucast_bytes;
- /* Number of received bytes for unicast traffic */
- uint64_t rx_mcast_bytes;
- /* Number of received bytes for multicast traffic */
- uint64_t rx_bcast_bytes;
- /* Number of received bytes for broadcast traffic */
- uint64_t rx_agg_pkts;
- /* Number of aggregated unicast packets */
- uint64_t rx_agg_bytes;
- /* Number of aggregated unicast bytes */
- uint64_t rx_agg_events;
- /* Number of aggregation events */
- uint64_t rx_agg_aborts;
- /* Number of aborted aggregations */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* hwrm_stat_ctx_clr_stats */
-/* Description: This command clears statistics of a context. */
-/* Input (24 bytes) */
-struct hwrm_stat_ctx_clr_stats_input {
- uint16_t req_type;
+/* hwrm_cfa_flow_flush_output (size:128b/16B) */
+struct hwrm_cfa_flow_flush_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_flow_stats *
+ ***********************/
+
+
+/* hwrm_cfa_flow_stats_input (size:320b/40B) */
+struct hwrm_cfa_flow_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t stat_ctx_id;
- /* ID of the statistics context that is being queried. */
- uint32_t unused_0;
+ uint64_t resp_addr;
+ /* Flow handle. */
+ uint16_t num_flows;
+ /* Flow handle. */
+ uint16_t flow_handle_0;
+ /* Flow handle. */
+ uint16_t flow_handle_1;
+ /* Flow handle. */
+ uint16_t flow_handle_2;
+ /* Flow handle. */
+ uint16_t flow_handle_3;
+ /* Flow handle. */
+ uint16_t flow_handle_4;
+ /* Flow handle. */
+ uint16_t flow_handle_5;
+ /* Flow handle. */
+ uint16_t flow_handle_6;
+ /* Flow handle. */
+ uint16_t flow_handle_7;
+ /* Flow handle. */
+ uint16_t flow_handle_8;
+ /* Flow handle. */
+ uint16_t flow_handle_9;
+ uint8_t unused_0[2];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_stat_ctx_clr_stats_output {
- uint16_t error_code;
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
+struct hwrm_cfa_flow_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* packet_0 is 64 b */
+ uint64_t packet_0;
+ /* packet_1 is 64 b */
+ uint64_t packet_1;
+ /* packet_2 is 64 b */
+ uint64_t packet_2;
+ /* packet_3 is 64 b */
+ uint64_t packet_3;
+ /* packet_4 is 64 b */
+ uint64_t packet_4;
+ /* packet_5 is 64 b */
+ uint64_t packet_5;
+ /* packet_6 is 64 b */
+ uint64_t packet_6;
+ /* packet_7 is 64 b */
+ uint64_t packet_7;
+ /* packet_8 is 64 b */
+ uint64_t packet_8;
+ /* packet_9 is 64 b */
+ uint64_t packet_9;
+ /* byte_0 is 64 b */
+ uint64_t byte_0;
+ /* byte_1 is 64 b */
+ uint64_t byte_1;
+ /* byte_2 is 64 b */
+ uint64_t byte_2;
+ /* byte_3 is 64 b */
+ uint64_t byte_3;
+ /* byte_4 is 64 b */
+ uint64_t byte_4;
+ /* byte_5 is 64 b */
+ uint64_t byte_5;
+ /* byte_6 is 64 b */
+ uint64_t byte_6;
+ /* byte_7 is 64 b */
+ uint64_t byte_7;
+ /* byte_8 is 64 b */
+ uint64_t byte_8;
+ /* byte_9 is 64 b */
+ uint64_t byte_9;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_cfa_vf_pair_alloc *
+ **************************/
+
+
+/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vf_pair_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_a_id;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_b_id;
+ uint8_t unused_0[4];
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
} __attribute__((packed));
-/* hwrm_exec_fwd_resp */
-/*
- * Description: This command is used to send an encapsulated request to the
- * HWRM. This command instructs the HWRM to execute the request and forward the
- * response of the encapsulated request to the location specified in the
- * original request that is encapsulated. The target id of this command shall be
- * set to 0xFFFF (HWRM). The response location in this command shall be used to
- * acknowledge the receipt of the encapsulated request and forwarding of the
- * response.
- */
-/* Input (128 bytes) */
-struct hwrm_exec_fwd_resp_input {
- uint16_t req_type;
+/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vf_pair_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_cfa_vf_pair_free *
+ *************************/
+
+
+/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */
+struct hwrm_cfa_vf_pair_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t encap_request[26];
+ uint16_t target_id;
/*
- * This is an encapsulated request. This request should be
- * executed by the HWRM and the response should be provided in
- * the response buffer inside the encapsulated request.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t encap_resp_target_id;
+ uint64_t resp_addr;
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */
+struct hwrm_cfa_vf_pair_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates the target id of the response to the
- * encapsulated request. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
- * HWRM
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t unused_0[3];
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_exec_fwd_resp_output {
- uint16_t error_code;
+/*************************
+ * hwrm_cfa_vf_pair_info *
+ *************************/
+
+
+/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */
+struct hwrm_cfa_vf_pair_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* If this flag is set, lookup by name else lookup by index. */
+ #define HWRM_CFA_VF_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
+ /* vf pair table index. */
+ uint16_t vf_pair_index;
+ uint8_t unused_0[2];
+ /* VF Pair name (32 byte string). */
+ char vf_pair_name[32];
} __attribute__((packed));
-/* hwrm_reject_fwd_resp */
-/*
- * Description: This command is used to send an encapsulated request to the
- * HWRM. This command instructs the HWRM to reject the request and forward the
- * error response of the encapsulated request to the location specified in the
- * original request that is encapsulated. The target id of this command shall be
- * set to 0xFFFF (HWRM). The response location in this command shall be used to
- * acknowledge the receipt of the encapsulated request and forwarding of the
- * response.
- */
-/* Input (128 bytes) */
-struct hwrm_reject_fwd_resp_input {
- uint16_t req_type;
+/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */
+struct hwrm_cfa_vf_pair_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* vf pair table index. */
+ uint16_t next_vf_pair_index;
+ /* vf pair member a's vf_fid. */
+ uint16_t vf_a_fid;
+ /* vf pair member a's Linux logical VF number. */
+ uint16_t vf_a_index;
+ /* vf pair member b's vf_fid. */
+ uint16_t vf_b_fid;
+ /* vf pair member a's Linux logical VF number. */
+ uint16_t vf_b_index;
+ /* vf pair state. */
+ uint8_t pair_state;
+ /* Pair has been allocated */
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
+ /* Both pair members are active */
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
+ HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
+ uint8_t unused_0[5];
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_pair_alloc *
+ ***********************/
+
+
+/* hwrm_cfa_pair_alloc_input (size:576b/72B) */
+struct hwrm_cfa_pair_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t encap_request[26];
+ uint64_t resp_addr;
+ /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair, 5-rep2fn_mod). */
+ uint8_t pair_mode;
+ /* Pair between VF on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
+ /* Pair between REP on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
+ /* Pair between REP on local host with REP on specified host. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
+ /* Pair for the proxy interface. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PROXY UINT32_C(0x3)
+ /* Pair for the PF interface. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
+ /* Modify exiting rep2fn pair and move pair to new PF. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MOD UINT32_C(0x5)
+ /* Modify exiting rep2fn pairs paired with same PF and move pairs to new PF. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL UINT32_C(0x6)
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_LAST \
+ HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL
+ uint8_t unused_0;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_a_id;
+ /* Logical Host (0xff-local host). */
+ uint8_t host_b_id;
+ /* Logical PF (0xff-PF for command channel). */
+ uint8_t pf_b_id;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_b_id;
+ /* Loopback port (0xff-internal loopback), valid for mode-3. */
+ uint8_t port_id;
+ /* Priority used for encap of loopback packets valid for mode-3. */
+ uint8_t pri;
+ /* New PF for rep2fn modify, valid for mode 5. */
+ uint16_t new_pf_fid;
+ uint32_t enables;
/*
- * This is an encapsulated request. This request should be
- * rejected by the HWRM and the error response should be
- * provided in the response buffer inside the encapsulated
- * request.
+ * This bit must be '1' for the q_ab field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the q_ba field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the fc_ab field to be
+ * configured.
*/
- uint16_t encap_resp_target_id;
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID UINT32_C(0x4)
/*
- * This value indicates the target id of the response to the
- * encapsulated request. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
- * HWRM
+ * This bit must be '1' for the fc_ba field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID UINT32_C(0x8)
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+ /*
+ * The q_ab value specifies the logical index of the TX/RX CoS
+ * queue to be assigned for traffic in the A to B direction of
+ * the interface pair. The default value is 0.
+ */
+ uint8_t q_ab;
+ /*
+ * The q_ba value specifies the logical index of the TX/RX CoS
+ * queue to be assigned for traffic in the B to A direction of
+ * the interface pair. The default value is 1.
+ */
+ uint8_t q_ba;
+ /*
+ * Specifies whether RX ring flow control is disabled (0) or enabled
+ * (1) in the A to B direction. The default value is 0, meaning that
+ * packets will be dropped when the B-side RX rings are full.
*/
- uint16_t unused_0[3];
+ uint8_t fc_ab;
+ /*
+ * Specifies whether RX ring flow control is disabled (0) or enabled
+ * (1) in the B to A direction. The default value is 1, meaning that
+ * the RX CoS queue will be flow controlled when the A-side RX rings
+ * are full.
+ */
+ uint8_t fc_ba;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_reject_fwd_resp_output {
- uint16_t error_code;
+/* hwrm_cfa_pair_alloc_output (size:192b/24B) */
+struct hwrm_cfa_pair_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Only valid for modes 1 and 2. */
+ uint16_t rx_cfa_code_a;
+ /* Only valid for modes 1 and 2. */
+ uint16_t tx_cfa_action_a;
+ /* Only valid for mode 2. */
+ uint16_t rx_cfa_code_b;
+ /* Only valid for mode 2. */
+ uint16_t tx_cfa_action_b;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_pair_free *
+ **********************/
+
+
+/* hwrm_cfa_pair_free_input (size:384b/48B) */
+struct hwrm_cfa_pair_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
} __attribute__((packed));
-/* hwrm_nvm_get_dir_entries */
-/* Input (24 bytes) */
-struct hwrm_nvm_get_dir_entries_input {
- uint16_t req_type;
- uint16_t cmpl_ring;
- uint16_t seq_id;
- uint16_t target_id;
- uint64_t resp_addr;
- uint64_t host_dest_addr;
+/* hwrm_cfa_pair_free_output (size:128b/16B) */
+struct hwrm_cfa_pair_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_nvm_get_dir_entries_output {
- uint16_t error_code;
- uint16_t req_type;
- uint16_t seq_id;
- uint16_t resp_len;
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
-} __attribute__((packed));
+/**********************
+ * hwrm_cfa_pair_info *
+ **********************/
-/* hwrm_nvm_erase_dir_entry */
-/* Input (24 bytes) */
-struct hwrm_nvm_erase_dir_entry_input {
- uint16_t req_type;
- uint16_t cmpl_ring;
- uint16_t seq_id;
- uint16_t target_id;
- uint64_t resp_addr;
- uint16_t dir_idx;
- uint16_t unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_nvm_erase_dir_entry_output {
- uint16_t error_code;
- uint16_t req_type;
- uint16_t seq_id;
- uint16_t resp_len;
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
-};
-
-/* hwrm_nvm_get_dir_info */
-/* Input (16 bytes) */
-struct hwrm_nvm_get_dir_info_input {
- uint16_t req_type;
- uint16_t cmpl_ring;
- uint16_t seq_id;
- uint16_t target_id;
- uint64_t resp_addr;
+/* hwrm_cfa_pair_info_input (size:448b/56B) */
+struct hwrm_cfa_pair_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* If this flag is set, lookup by name else lookup by index. */
+ #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
+ /* If this flag is set, lookup by PF id and VF id. */
+ #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_REPRE UINT32_C(0x2)
+ /* Pair table index. */
+ uint16_t pair_index;
+ /* Pair pf index. */
+ uint8_t pair_pfid;
+ /* Pair vf index. */
+ uint8_t pair_vfid;
+ /* Pair name (32 byte string). */
+ char pair_name[32];
} __attribute__((packed));
-/* Output (24 bytes) */
-struct hwrm_nvm_get_dir_info_output {
- uint16_t error_code;
+/* hwrm_cfa_pair_info_output (size:576b/72B) */
+struct hwrm_cfa_pair_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Pair table index. */
+ uint16_t next_pair_index;
+ /* Pair member a's fid. */
+ uint16_t a_fid;
+ /* Logical host number. */
+ uint8_t host_a_index;
+ /* Logical PF number. */
+ uint8_t pf_a_index;
+ /* Pair member a's Linux logical VF number. */
+ uint16_t vf_a_index;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code_a;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action_a;
+ /* Pair member b's fid. */
+ uint16_t b_fid;
+ /* Logical host number. */
+ uint8_t host_b_index;
+ /* Logical PF number. */
+ uint8_t pf_b_index;
+ /* Pair member a's Linux logical VF number. */
+ uint16_t vf_b_index;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code_b;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action_b;
+ /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair). */
+ uint8_t pair_mode;
+ /* Pair between VF on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
+ /* Pair between REP on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
+ /* Pair between REP on local host with REP on specified host. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
+ /* Pair for the proxy interface. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PROXY UINT32_C(0x3)
+ /* Pair for the PF interface. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_LAST \
+ HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR
+ /* Pair state. */
+ uint8_t pair_state;
+ /* Pair has been allocated */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
+ /* Both pair members are active */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
+ HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
+ /* Pair name (32 byte string). */
+ char pair_name[32];
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_vfr_alloc *
+ **********************/
+
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t entries;
- /* Number of directory entries in the directory. */
- uint32_t entry_length;
- /* Size of each directory entry, in bytes. */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved;
+ uint8_t unused_0[4];
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
} __attribute__((packed));
-/* hwrm_nvm_write */
-/*
- * Note: Write to the allocated NVRAM of an item referenced by an existing
- * directory entry.
- */
-/* Input (48 bytes) */
-struct hwrm_nvm_write_input {
- uint16_t req_type;
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_cfa_vfr_free *
+ *********************/
+
+
+/* hwrm_cfa_vfr_free_input (size:384b/48B) */
+struct hwrm_cfa_vfr_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint64_t host_src_addr;
- /* 64-bit Host Source Address. This is where the source data is. */
- uint16_t dir_type;
+ uint64_t resp_addr;
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * The Directory Entry Type (valid values are defined in the
- * bnxnvm_directory_type enum defined in the file
- * bnxnvm_defs.h).
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t dir_ordinal;
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_tunnel_dst_port_query *
+ ******************************/
+
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Directory ordinal. The 0-based instance of the combined
- * Directory Entry Type and Extension.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t dir_ext;
+ uint16_t cmpl_ring;
/*
- * The Directory Entry Extension flags (see BNX_DIR_EXT_* in the
- * file bnxnvm_defs.h).
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t dir_attr;
+ uint16_t seq_id;
/*
- * Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the
- * file bnxnvm_defs.h).
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t dir_data_length;
+ uint16_t target_id;
/*
- * Length of data to write, in bytes. May be less than or equal
- * to the allocated size for the directory entry. The data
- * length stored in the directory entry will be updated to
- * reflect this value once the write is complete.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t option;
- /* Option. */
- uint16_t flags;
+ uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * When this bit is '1', the original active image will not be
- * removed. TBD: what purpose is this?
+ * This field represents the identifier of L4 destination port
+ * used for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ */
+ uint16_t tunnel_dst_port_id;
+ /*
+ * This field represents the value of L4 destination port
+ * identified by tunnel_dst_port_id. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ * This field is in network byte order.
+ *
+ * A value of 0 means that the destination port is not
+ * configured.
*/
- #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG UINT32_C(0x1)
- uint32_t dir_item_length;
+ uint16_t tunnel_dst_port_val;
+ uint8_t unused_0[3];
/*
- * The requested length of the allocated NVM for the item, in
- * bytes. This value may be greater than or equal to the
- * specified data length (dir_data_length). If this value is
- * less than the specified data length, it will be ignored. The
- * response will contain the actual allocated item length, which
- * may be greater than the requested item length. The purpose
- * for allocating more than the required number of bytes for an
- * item's data is to pre-allocate extra storage (padding) to
- * accommodate the potential future growth of an item (e.g.
- * upgraded firmware with a size increase, log growth, expanded
- * configuration data).
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint32_t unused_0;
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_nvm_write_output {
- uint16_t error_code;
+/******************************
+ * hwrm_tunnel_dst_port_alloc *
+ ******************************/
+
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t dir_item_length;
+ uint16_t seq_id;
/*
- * Length of the allocated NVM for the item, in bytes. The value
- * may be greater than or equal to the specified data length or
- * the requested item length. The actual item length used when
- * creating a new directory entry will be a multiple of an NVM
- * block size.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t dir_idx;
- /* The directory index of the created or modified item. */
- uint8_t unused_0;
- uint8_t valid;
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1
+ uint8_t unused_0;
+ /*
+ * This field represents the value of L4 destination port used
+ * for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ *
+ * This field is in network byte order.
+ *
+ * A value of 0 shall fail the command.
+ */
+ uint16_t tunnel_dst_port_val;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* hwrm_nvm_read */
-/*
- * Note: Read the contents of an NVRAM item as referenced (indexed) by an
- * existing directory entry.
- */
-/* Input (40 bytes) */
-struct hwrm_nvm_read_input {
- uint16_t req_type;
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * Identifier of a tunnel L4 destination port value. Only applies to tunnel
+ * types that has l4 destination port parameters.
*/
- uint16_t cmpl_ring;
+ uint16_t tunnel_dst_port_id;
+ uint8_t unused_0[5];
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_tunnel_dst_port_free *
+ *****************************/
+
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint64_t resp_addr;
+ uint16_t cmpl_ring;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t host_dest_addr;
+ uint16_t seq_id;
/*
- * 64-bit Host Destination Address. This is the host address
- * where the data will be written to.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t dir_idx;
- /* The 0-based index of the directory entry. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t offset;
- /* The NVRAM byte-offset to read from. */
- uint32_t len;
- /* The length of the data to be read, in bytes. */
- uint32_t unused_2;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1
+ uint8_t unused_0;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only applies to tunnel
+ * types that has l4 destination port parameters.
+ */
+ uint16_t tunnel_dst_port_id;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_nvm_read_output {
- uint16_t error_code;
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_1[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of discarded packets on received path */
+ uint64_t rx_discard_pkts;
+ /* Number of dropped packets on received path */
+ uint64_t rx_drop_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of discarded packets on transmit path */
+ uint64_t tx_discard_pkts;
+ /* Number of dropped packets on transmit path */
+ uint64_t tx_drop_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of TPA packets */
+ uint64_t tpa_pkts;
+ /* Number of TPA bytes */
+ uint64_t tpa_bytes;
+ /* Number of TPA events */
+ uint64_t tpa_events;
+ /* Number of TPA aborts */
+ uint64_t tpa_aborts;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_stat_ctx_alloc *
+ ***********************/
+
+
+/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
+struct hwrm_stat_ctx_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
-} __attribute__((packed));
-
-/* Hardware Resource Manager Specification */
-/* Description: This structure is used to specify port description. */
-/*
- * Note: The Hardware Resource Manager (HWRM) manages various hardware resources
- * inside the chip. The HWRM is implemented in firmware, and runs on embedded
- * processors inside the chip. This firmware service is vital part of the chip.
- * The chip can not be used by a driver or HWRM client without the HWRM.
- */
-/* Input (16 bytes) */
-struct input {
- uint16_t req_type;
+ uint16_t seq_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t cmpl_ring;
+ uint16_t target_id;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint64_t resp_addr;
+ /* This is the address for statistic block. */
+ uint64_t stats_dma_addr;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The statistic block update period in ms.
+ * e.g. 250ms, 500ms, 750ms, 1000ms.
+ * If update_period_ms is 0, then the stats update
+ * shall be never done and the DMA address shall not be used.
+ * In this case, the stat block can only be read by
+ * hwrm_stat_ctx_query command.
*/
- uint64_t resp_addr;
+ uint32_t update_period_ms;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * This field is used to specify statistics context specific
+ * configuration flags.
+ */
+ uint8_t stat_ctx_flags;
+ /*
+ * When this bit is set to '1', the statistics context shall be
+ * allocated for RoCE traffic only. In this case, traffic other
+ * than offloaded RoCE traffic shall not be included in this
+ * statistic context.
+ * When this bit is set to '0', the statistics context shall be
+ * used for the network traffic other than offloaded RoCE traffic.
*/
+ #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
+ uint8_t unused_0[3];
} __attribute__((packed));
-/* Output (8 bytes) */
-struct output {
- uint16_t error_code;
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[3];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_stat_ctx_free *
+ **********************/
+
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Short Command Structure (16 bytes) */
-struct hwrm_short_input {
- uint16_t req_type;
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_stat_ctx_query *
+ ***********************/
+
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field indicates the type of request in the request
- * buffer. The format for the rest of the command (request) is
- * determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t signature;
+ uint16_t cmpl_ring;
/*
- * This field indicates a signature that is used to identify
- * short form of the command listed here. This field shall be
- * set to 17185 (0x4321).
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- /* Signature indicating this is a short form of HWRM command */
- #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD UINT32_C(0x4321)
- uint16_t unused_0;
- /* Reserved for future use. */
- uint16_t size;
- /* This value indicates the length of the request. */
- uint64_t req_addr;
- /*
- * This is the host address where the request was written. This
- * area must be 16B aligned.
- */
-} __attribute__((packed));
-
-#define HWRM_GET_HWRM_ERROR_CODE(arg) \
- { \
- typeof(arg) x = (arg); \
- ((x) == 0xf ? "HWRM_ERROR" : \
- ((x) == 0xffff ? "CMD_NOT_SUPPORTED" : \
- ((x) == 0xfffe ? "UNKNOWN_ERR" : \
- ((x) == 0x4 ? "RESOURCE_ALLOC_ERROR" : \
- ((x) == 0x5 ? "INVALID_FLAGS" : \
- ((x) == 0x6 ? "INVALID_ENABLES" : \
- ((x) == 0x0 ? "SUCCESS" : \
- ((x) == 0x1 ? "FAIL" : \
- ((x) == 0x2 ? "INVALID_PARAMS" : \
- ((x) == 0x3 ? "RESOURCE_ACCESS_DENIED" : \
- "Unknown error_code")))))))))) \
- }
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
- uint16_t error_code;
- /* These are numbers assigned to return/error codes. */
- /* Request was successfully executed by the HWRM. */
- #define HWRM_ERR_CODE_SUCCESS (UINT32_C(0x0))
- /* THe HWRM failed to execute the request. */
- #define HWRM_ERR_CODE_FAIL (UINT32_C(0x1))
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of transmitted packets with error */
+ uint64_t tx_err_pkts;
+ /* Number of dropped packets on transmit path */
+ uint64_t tx_drop_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of received packets with error */
+ uint64_t rx_err_pkts;
+ /* Number of dropped packets on received path */
+ uint64_t rx_drop_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of aggregated unicast packets */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast bytes */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregation events */
+ uint64_t rx_agg_events;
+ /* Number of aborted aggregations */
+ uint64_t rx_agg_aborts;
+ uint8_t unused_0[7];
/*
- * The request contains invalid argument(s) or
- * input parameters.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- #define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2))
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_stat_ctx_clr_stats *
+ ***************************/
+
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * The requester is not allowed to access the
- * requested resource. This error code shall be
- * provided in a response to a request to query
- * or modify an existing resource that is not
- * accessible by the requester.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3))
+ uint16_t cmpl_ring;
/*
- * The HWRM is unable to allocate the requested
- * resource. This code only applies to requests
- * for HWRM resource allocations.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (UINT32_C(0x4))
- /* Invalid combination of flags is specified in the request. */
- #define HWRM_ERR_CODE_INVALID_FLAGS (UINT32_C(0x5))
+ uint16_t seq_id;
/*
- * Invalid combination of enables fields is
- * specified in the request.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- #define HWRM_ERR_CODE_INVALID_ENABLES (UINT32_C(0x6))
+ uint16_t target_id;
/*
- * Generic HWRM execution error that represents
- * an internal error.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_ERR_CODE_HWRM_ERROR (UINT32_C(0xf))
- /* Unknown error */
- #define HWRM_ERR_CODE_UNKNOWN_ERR (UINT32_C(0xfffe))
- /* Unsupported or invalid command */
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (UINT32_C(0xffff))
- uint16_t unused_0[3];
+ uint64_t resp_addr;
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_err_output {
- uint16_t error_code;
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_pcie_qstats *
+ ********************/
+
+
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t opaque_0;
- /* debug info for this error response. */
- uint16_t opaque_1;
- /* debug info for this error response. */
- uint8_t cmd_err;
+ uint16_t cmpl_ring;
/*
- * In the case of an error response, command specific error code
- * is returned in this field.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * The size of PCIe statistics block in bytes.
+ * Firmware will DMA the PCIe statistics to
+ * the host with this field size in the response.
+ */
+ uint16_t pcie_stat_size;
+ uint8_t unused_0[6];
+ /*
+ * This is the host address where
+ * PCIe statistics will be stored
+ */
+ uint64_t pcie_stat_host_addr;
+} __attribute__((packed));
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The size of PCIe statistics block in bytes. */
+ uint16_t pcie_stat_size;
+ uint8_t unused_0[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* Port Tx Statistics Formats (408 bytes) */
+/* Port Tx Statistics Formats */
+/* tx_port_stats (size:3264b/408B) */
struct tx_port_stats {
- uint64_t tx_64b_frames;
/* Total Number of 64 Bytes frames transmitted */
- uint64_t tx_65b_127b_frames;
+ uint64_t tx_64b_frames;
/* Total Number of 65-127 Bytes frames transmitted */
- uint64_t tx_128b_255b_frames;
+ uint64_t tx_65b_127b_frames;
/* Total Number of 128-255 Bytes frames transmitted */
- uint64_t tx_256b_511b_frames;
+ uint64_t tx_128b_255b_frames;
/* Total Number of 256-511 Bytes frames transmitted */
- uint64_t tx_512b_1023b_frames;
+ uint64_t tx_256b_511b_frames;
/* Total Number of 512-1023 Bytes frames transmitted */
- uint64_t tx_1024b_1518_frames;
+ uint64_t tx_512b_1023b_frames;
/* Total Number of 1024-1518 Bytes frames transmitted */
- uint64_t tx_good_vlan_frames;
+ uint64_t tx_1024b_1518_frames;
/*
- * Total Number of each good VLAN (exludes FCS errors) frame
- * transmitted which is 1519 to 1522 bytes in length inclusive
- * (excluding framing bits but including FCS bytes).
+ * Total Number of each good VLAN (exludes FCS errors)
+ * frame transmitted which is 1519 to 1522 bytes in length
+ * inclusive (excluding framing bits but including FCS bytes).
*/
- uint64_t tx_1519b_2047_frames;
+ uint64_t tx_good_vlan_frames;
/* Total Number of 1519-2047 Bytes frames transmitted */
- uint64_t tx_2048b_4095b_frames;
+ uint64_t tx_1519b_2047_frames;
/* Total Number of 2048-4095 Bytes frames transmitted */
- uint64_t tx_4096b_9216b_frames;
+ uint64_t tx_2048b_4095b_frames;
/* Total Number of 4096-9216 Bytes frames transmitted */
- uint64_t tx_9217b_16383b_frames;
+ uint64_t tx_4096b_9216b_frames;
/* Total Number of 9217-16383 Bytes frames transmitted */
- uint64_t tx_good_frames;
+ uint64_t tx_9217b_16383b_frames;
/* Total Number of good frames transmitted */
- uint64_t tx_total_frames;
+ uint64_t tx_good_frames;
/* Total Number of frames transmitted */
- uint64_t tx_ucast_frames;
+ uint64_t tx_total_frames;
/* Total number of unicast frames transmitted */
- uint64_t tx_mcast_frames;
+ uint64_t tx_ucast_frames;
/* Total number of multicast frames transmitted */
- uint64_t tx_bcast_frames;
+ uint64_t tx_mcast_frames;
/* Total number of broadcast frames transmitted */
- uint64_t tx_pause_frames;
+ uint64_t tx_bcast_frames;
/* Total number of PAUSE control frames transmitted */
- uint64_t tx_pfc_frames;
- /* Total number of PFC/per-priority PAUSE control frames transmitted */
- uint64_t tx_jabber_frames;
+ uint64_t tx_pause_frames;
+ /*
+ * Total number of PFC/per-priority PAUSE
+ * control frames transmitted
+ */
+ uint64_t tx_pfc_frames;
/* Total number of jabber frames transmitted */
- uint64_t tx_fcs_err_frames;
+ uint64_t tx_jabber_frames;
/* Total number of frames transmitted with FCS error */
- uint64_t tx_control_frames;
+ uint64_t tx_fcs_err_frames;
/* Total number of control frames transmitted */
- uint64_t tx_oversz_frames;
+ uint64_t tx_control_frames;
/* Total number of over-sized frames transmitted */
- uint64_t tx_single_dfrl_frames;
+ uint64_t tx_oversz_frames;
/* Total number of frames with single deferral */
- uint64_t tx_multi_dfrl_frames;
+ uint64_t tx_single_dfrl_frames;
/* Total number of frames with multiple deferrals */
- uint64_t tx_single_coll_frames;
+ uint64_t tx_multi_dfrl_frames;
/* Total number of frames with single collision */
- uint64_t tx_multi_coll_frames;
+ uint64_t tx_single_coll_frames;
/* Total number of frames with multiple collisions */
- uint64_t tx_late_coll_frames;
+ uint64_t tx_multi_coll_frames;
/* Total number of frames with late collisions */
- uint64_t tx_excessive_coll_frames;
+ uint64_t tx_late_coll_frames;
/* Total number of frames with excessive collisions */
- uint64_t tx_frag_frames;
+ uint64_t tx_excessive_coll_frames;
/* Total number of fragmented frames transmitted */
- uint64_t tx_err;
+ uint64_t tx_frag_frames;
/* Total number of transmit errors */
- uint64_t tx_tagged_frames;
+ uint64_t tx_err;
/* Total number of single VLAN tagged frames transmitted */
- uint64_t tx_dbl_tagged_frames;
+ uint64_t tx_tagged_frames;
/* Total number of double VLAN tagged frames transmitted */
- uint64_t tx_runt_frames;
+ uint64_t tx_dbl_tagged_frames;
/* Total number of runt frames transmitted */
- uint64_t tx_fifo_underruns;
+ uint64_t tx_runt_frames;
/* Total number of TX FIFO under runs */
- uint64_t tx_pfc_ena_frames_pri0;
+ uint64_t tx_fifo_underruns;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 0
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 0 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri1;
+ uint64_t tx_pfc_ena_frames_pri0;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 1
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 1 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri2;
+ uint64_t tx_pfc_ena_frames_pri1;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 2
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 2 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri3;
+ uint64_t tx_pfc_ena_frames_pri2;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 3
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 3 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri4;
+ uint64_t tx_pfc_ena_frames_pri3;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 4
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 4 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri5;
+ uint64_t tx_pfc_ena_frames_pri4;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 5
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 5 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri6;
+ uint64_t tx_pfc_ena_frames_pri5;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 6
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 6 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri7;
+ uint64_t tx_pfc_ena_frames_pri6;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 7
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 7 transmitted
*/
- uint64_t tx_eee_lpi_events;
+ uint64_t tx_pfc_ena_frames_pri7;
/* Total number of EEE LPI Events on TX */
- uint64_t tx_eee_lpi_duration;
+ uint64_t tx_eee_lpi_events;
/* EEE LPI Duration Counter on TX */
- uint64_t tx_llfc_logical_msgs;
+ uint64_t tx_eee_lpi_duration;
/*
- * Total number of Link Level Flow Control (LLFC) messages
+ * Total number of Link Level Flow Control (LLFC) messages
* transmitted
*/
- uint64_t tx_hcfc_msgs;
+ uint64_t tx_llfc_logical_msgs;
/* Total number of HCFC messages transmitted */
- uint64_t tx_total_collisions;
+ uint64_t tx_hcfc_msgs;
/* Total number of TX collisions */
- uint64_t tx_bytes;
+ uint64_t tx_total_collisions;
/* Total number of transmitted bytes */
- uint64_t tx_xthol_frames;
+ uint64_t tx_bytes;
/* Total number of end-to-end HOL frames */
- uint64_t tx_stat_discard;
+ uint64_t tx_xthol_frames;
/* Total Tx Drops per Port reported by STATS block */
- uint64_t tx_stat_error;
+ uint64_t tx_stat_discard;
/* Total Tx Error Drops per Port reported by STATS block */
+ uint64_t tx_stat_error;
} __attribute__((packed));
-/* Port Rx Statistics Formats (528 bytes) */
+/* Port Rx Statistics Formats */
+/* rx_port_stats (size:4224b/528B) */
struct rx_port_stats {
- uint64_t rx_64b_frames;
/* Total Number of 64 Bytes frames received */
- uint64_t rx_65b_127b_frames;
+ uint64_t rx_64b_frames;
/* Total Number of 65-127 Bytes frames received */
- uint64_t rx_128b_255b_frames;
+ uint64_t rx_65b_127b_frames;
/* Total Number of 128-255 Bytes frames received */
- uint64_t rx_256b_511b_frames;
+ uint64_t rx_128b_255b_frames;
/* Total Number of 256-511 Bytes frames received */
- uint64_t rx_512b_1023b_frames;
+ uint64_t rx_256b_511b_frames;
/* Total Number of 512-1023 Bytes frames received */
- uint64_t rx_1024b_1518_frames;
+ uint64_t rx_512b_1023b_frames;
/* Total Number of 1024-1518 Bytes frames received */
- uint64_t rx_good_vlan_frames;
+ uint64_t rx_1024b_1518_frames;
/*
- * Total Number of each good VLAN (exludes FCS errors) frame
- * received which is 1519 to 1522 bytes in length inclusive
- * (excluding framing bits but including FCS bytes).
+ * Total Number of each good VLAN (exludes FCS errors)
+ * frame received which is 1519 to 1522 bytes in length
+ * inclusive (excluding framing bits but including FCS bytes).
*/
- uint64_t rx_1519b_2047b_frames;
+ uint64_t rx_good_vlan_frames;
/* Total Number of 1519-2047 Bytes frames received */
- uint64_t rx_2048b_4095b_frames;
+ uint64_t rx_1519b_2047b_frames;
/* Total Number of 2048-4095 Bytes frames received */
- uint64_t rx_4096b_9216b_frames;
+ uint64_t rx_2048b_4095b_frames;
/* Total Number of 4096-9216 Bytes frames received */
- uint64_t rx_9217b_16383b_frames;
+ uint64_t rx_4096b_9216b_frames;
/* Total Number of 9217-16383 Bytes frames received */
- uint64_t rx_total_frames;
+ uint64_t rx_9217b_16383b_frames;
/* Total number of frames received */
- uint64_t rx_ucast_frames;
+ uint64_t rx_total_frames;
/* Total number of unicast frames received */
- uint64_t rx_mcast_frames;
+ uint64_t rx_ucast_frames;
/* Total number of multicast frames received */
- uint64_t rx_bcast_frames;
+ uint64_t rx_mcast_frames;
/* Total number of broadcast frames received */
- uint64_t rx_fcs_err_frames;
+ uint64_t rx_bcast_frames;
/* Total number of received frames with FCS error */
- uint64_t rx_ctrl_frames;
+ uint64_t rx_fcs_err_frames;
/* Total number of control frames received */
- uint64_t rx_pause_frames;
+ uint64_t rx_ctrl_frames;
/* Total number of PAUSE frames received */
- uint64_t rx_pfc_frames;
+ uint64_t rx_pause_frames;
/* Total number of PFC frames received */
- uint64_t rx_unsupported_opcode_frames;
- /* Total number of frames received with an unsupported opcode */
- uint64_t rx_unsupported_da_pausepfc_frames;
+ uint64_t rx_pfc_frames;
/*
- * Total number of frames received with an unsupported DA for
- * pause and PFC
+ * Total number of frames received with an unsupported
+ * opcode
*/
- uint64_t rx_wrong_sa_frames;
+ uint64_t rx_unsupported_opcode_frames;
+ /*
+ * Total number of frames received with an unsupported
+ * DA for pause and PFC
+ */
+ uint64_t rx_unsupported_da_pausepfc_frames;
/* Total number of frames received with an unsupported SA */
- uint64_t rx_align_err_frames;
+ uint64_t rx_wrong_sa_frames;
/* Total number of received packets with alignment error */
- uint64_t rx_oor_len_frames;
+ uint64_t rx_align_err_frames;
/* Total number of received frames with out-of-range length */
- uint64_t rx_code_err_frames;
+ uint64_t rx_oor_len_frames;
/* Total number of received frames with error termination */
- uint64_t rx_false_carrier_frames;
+ uint64_t rx_code_err_frames;
/*
* Total number of received frames with a false carrier is
- * detected during idle, as defined by RX_ER samples active and
- * RXD is 0xE. The event is reported along with the statistics
- * generated on the next received frame. Only one false carrier
- * condition can be detected and logged between frames. Carrier
- * event, valid for 10M/100M speed modes only.
- */
- uint64_t rx_ovrsz_frames;
+ * detected during idle, as defined by RX_ER samples active
+ * and RXD is 0xE. The event is reported along with the
+ * statistics generated on the next received frame. Only
+ * one false carrier condition can be detected and logged
+ * between frames.
+ *
+ * Carrier event, valid for 10M/100M speed modes only.
+ */
+ uint64_t rx_false_carrier_frames;
/* Total number of over-sized frames received */
- uint64_t rx_jbr_frames;
+ uint64_t rx_ovrsz_frames;
/* Total number of jabber packets received */
- uint64_t rx_mtu_err_frames;
+ uint64_t rx_jbr_frames;
/* Total number of received frames with MTU error */
- uint64_t rx_match_crc_frames;
+ uint64_t rx_mtu_err_frames;
/* Total number of received frames with CRC match */
- uint64_t rx_promiscuous_frames;
+ uint64_t rx_match_crc_frames;
/* Total number of frames received promiscuously */
- uint64_t rx_tagged_frames;
- /* Total number of received frames with one or two VLAN tags */
- uint64_t rx_double_tagged_frames;
+ uint64_t rx_promiscuous_frames;
+ /*
+ * Total number of received frames with one or two VLAN
+ * tags
+ */
+ uint64_t rx_tagged_frames;
/* Total number of received frames with two VLAN tags */
- uint64_t rx_trunc_frames;
+ uint64_t rx_double_tagged_frames;
/* Total number of truncated frames received */
- uint64_t rx_good_frames;
- /* Total number of good frames (without errors) received */
- uint64_t rx_pfc_xon2xoff_frames_pri0;
+ uint64_t rx_trunc_frames;
+ /* Total number of good frames (without errors) received */
+ uint64_t rx_good_frames;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 0
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 0
*/
- uint64_t rx_pfc_xon2xoff_frames_pri1;
+ uint64_t rx_pfc_xon2xoff_frames_pri0;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 1
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 1
*/
- uint64_t rx_pfc_xon2xoff_frames_pri2;
+ uint64_t rx_pfc_xon2xoff_frames_pri1;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 2
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 2
*/
- uint64_t rx_pfc_xon2xoff_frames_pri3;
+ uint64_t rx_pfc_xon2xoff_frames_pri2;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 3
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 3
*/
- uint64_t rx_pfc_xon2xoff_frames_pri4;
+ uint64_t rx_pfc_xon2xoff_frames_pri3;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 4
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 4
*/
- uint64_t rx_pfc_xon2xoff_frames_pri5;
+ uint64_t rx_pfc_xon2xoff_frames_pri4;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 5
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 5
*/
- uint64_t rx_pfc_xon2xoff_frames_pri6;
+ uint64_t rx_pfc_xon2xoff_frames_pri5;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 6
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 6
*/
- uint64_t rx_pfc_xon2xoff_frames_pri7;
+ uint64_t rx_pfc_xon2xoff_frames_pri6;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 7
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 7
*/
- uint64_t rx_pfc_ena_frames_pri0;
+ uint64_t rx_pfc_xon2xoff_frames_pri7;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 0
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 0
*/
- uint64_t rx_pfc_ena_frames_pri1;
+ uint64_t rx_pfc_ena_frames_pri0;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 1
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 1
*/
- uint64_t rx_pfc_ena_frames_pri2;
+ uint64_t rx_pfc_ena_frames_pri1;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 2
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 2
*/
- uint64_t rx_pfc_ena_frames_pri3;
+ uint64_t rx_pfc_ena_frames_pri2;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 3
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 3
*/
- uint64_t rx_pfc_ena_frames_pri4;
+ uint64_t rx_pfc_ena_frames_pri3;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 4
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 4
*/
- uint64_t rx_pfc_ena_frames_pri5;
+ uint64_t rx_pfc_ena_frames_pri4;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 5
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 5
*/
- uint64_t rx_pfc_ena_frames_pri6;
+ uint64_t rx_pfc_ena_frames_pri5;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 6
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 6
*/
- uint64_t rx_pfc_ena_frames_pri7;
+ uint64_t rx_pfc_ena_frames_pri6;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 7
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 7
*/
- uint64_t rx_sch_crc_err_frames;
+ uint64_t rx_pfc_ena_frames_pri7;
/* Total Number of frames received with SCH CRC error */
- uint64_t rx_undrsz_frames;
+ uint64_t rx_sch_crc_err_frames;
/* Total Number of under-sized frames received */
- uint64_t rx_frag_frames;
+ uint64_t rx_undrsz_frames;
/* Total Number of fragmented frames received */
- uint64_t rx_eee_lpi_events;
+ uint64_t rx_frag_frames;
/* Total number of RX EEE LPI Events */
- uint64_t rx_eee_lpi_duration;
+ uint64_t rx_eee_lpi_events;
/* EEE LPI Duration Counter on RX */
- uint64_t rx_llfc_physical_msgs;
+ uint64_t rx_eee_lpi_duration;
/*
- * Total number of physical type Link Level Flow Control (LLFC)
- * messages received
+ * Total number of physical type Link Level Flow Control
+ * (LLFC) messages received
*/
- uint64_t rx_llfc_logical_msgs;
+ uint64_t rx_llfc_physical_msgs;
/*
- * Total number of logical type Link Level Flow Control (LLFC)
- * messages received
+ * Total number of logical type Link Level Flow Control
+ * (LLFC) messages received
*/
- uint64_t rx_llfc_msgs_with_crc_err;
+ uint64_t rx_llfc_logical_msgs;
/*
- * Total number of logical type Link Level Flow Control (LLFC)
- * messages received with CRC error
+ * Total number of logical type Link Level Flow Control
+ * (LLFC) messages received with CRC error
*/
- uint64_t rx_hcfc_msgs;
+ uint64_t rx_llfc_msgs_with_crc_err;
/* Total number of HCFC messages received */
- uint64_t rx_hcfc_msgs_with_crc_err;
+ uint64_t rx_hcfc_msgs;
/* Total number of HCFC messages received with CRC error */
- uint64_t rx_bytes;
+ uint64_t rx_hcfc_msgs_with_crc_err;
/* Total number of received bytes */
- uint64_t rx_runt_bytes;
+ uint64_t rx_bytes;
/* Total number of bytes received in runt frames */
- uint64_t rx_runt_frames;
+ uint64_t rx_runt_bytes;
/* Total number of runt frames received */
- uint64_t rx_stat_discard;
+ uint64_t rx_runt_frames;
/* Total Rx Discards per Port reported by STATS block */
- uint64_t rx_stat_err;
- /* Total Rx Error Drops per Port reported by STATS block */
+ uint64_t rx_stat_discard;
+ uint64_t rx_stat_err;
} __attribute__((packed));
-/* Periodic Statistics Context DMA to host (160 bytes) */
-/*
- * per-context HW statistics -- chip view
- */
+/* Port Rx Statistics extended Formats */
+/* rx_port_stats_ext (size:320b/40B) */
+struct rx_port_stats_ext {
+ /* Number of times link state changed to down */
+ uint64_t link_down_events;
+ /* Number of times the idle rings with pause bit are found */
+ uint64_t continuous_pause_events;
+ /* Number of times the active rings pause bit resumed back */
+ uint64_t resume_pause_events;
+ /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */
+ uint64_t continuous_roce_pause_events;
+ /* Number of times, the ROCE cos queue PFC is enabled back */
+ uint64_t resume_roce_pause_events;
+} __attribute__((packed));
+
+/* PCIe Statistics Formats */
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ /* Number of physical layer receiver errors */
+ uint64_t pcie_pl_signal_integrity;
+ /* Number of DLLP CRC errors detected by Data Link Layer */
+ uint64_t pcie_dl_signal_integrity;
+ /*
+ * Number of TLP LCRC and sequence number errors detected
+ * by Data Link Layer
+ */
+ uint64_t pcie_tl_signal_integrity;
+ /* Number of times LTSSM entered Recovery state */
+ uint64_t pcie_link_integrity;
+ /* Number of TLP bytes that have been trasmitted */
+ uint64_t pcie_tx_traffic_rate;
+ /* Number of TLP bytes that have been received */
+ uint64_t pcie_rx_traffic_rate;
+ /* Number of DLLP bytes that have been trasmitted */
+ uint64_t pcie_tx_dllp_statistics;
+ /* Number of DLLP bytes that have been received */
+ uint64_t pcie_rx_dllp_statistics;
+ /*
+ * Number of times spent in each phase of gen3
+ * equalization
+ */
+ uint64_t pcie_equalization_time;
+ /* Records the last 16 transitions of the LTSSM */
+ uint32_t pcie_ltssm_histogram[4];
+ /*
+ * Record the last 8 reasons on why LTSSM transitioned
+ * to Recovery
+ */
+ uint64_t pcie_recovery_histogram;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_exec_fwd_resp *
+ **********************/
+
+
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is an encapsulated request. This request should
+ * be executed by the HWRM and the response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
+ */
+ uint32_t encap_request[26];
+ /*
+ * This value indicates the target id of the response to
+ * the encapsulated request.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_reject_fwd_resp *
+ ************************/
+
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is an encapsulated request. This request should
+ * be rejected by the HWRM and the error response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
+ */
+ uint32_t encap_request[26];
+ /*
+ * This value indicates the target id of the response to
+ * the encapsulated request.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************
+ * hwrm_fwd_resp *
+ *****************/
+
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
+struct hwrm_fwd_resp_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value indicates the target id of the encapsulated
+ * response.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+ /*
+ * This value indicates the completion ring the encapsulated
+ * response will be optionally completed on. If the value is
+ * -1, then no CR completion shall be generated for the
+ * encapsulated response. Any other value must be a
+ * valid CR ring_id value. If a valid encap_resp_cmpl_ring
+ * is provided, then a CR completion shall be generated for
+ * the encapsulated response.
+ */
+ uint16_t encap_resp_cmpl_ring;
+ /* This field indicates the length of encapsulated response. */
+ uint16_t encap_resp_len;
+ uint8_t unused_0;
+ uint8_t unused_1;
+ /*
+ * This is the host address where the encapsulated response
+ * will be written.
+ * This area must be 16B aligned and must be cleared to zero
+ * before the original request is made.
+ */
+ uint64_t encap_resp_addr;
+ /* This is an encapsulated response. */
+ uint32_t encap_resp[24];
+} __attribute__((packed));
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_fwd_async_event_cmpl *
+ *****************************/
+
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value indicates the target id of the encapsulated
+ * asynchronous event.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - Broadcast to all children VFs (only applicable when
+ * a PF is the requester)
+ */
+ uint16_t encap_async_event_target_id;
+ uint8_t unused_0[6];
+ /* This is an encapsulated asynchronous event completion. */
+ uint32_t encap_async_event_cmpl[4];
+} __attribute__((packed));
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_nvm_raw_write_blk *
+ **************************/
+
+
+/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */
+struct hwrm_nvm_raw_write_blk_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Source Address.
+ * This is the loation of the source data to be written.
+ */
+ uint64_t host_src_addr;
+ /*
+ * 32-bit Destination Address.
+ * This is the NVRAM byte-offset where the source data will be written to.
+ */
+ uint32_t dest_addr;
+ /* Length of data to be written, in bytes. */
+ uint32_t len;
+} __attribute__((packed));
+
+/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */
+struct hwrm_nvm_raw_write_blk_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************
+ * hwrm_nvm_read *
+ *****************/
+
+
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Destination Address.
+ * This is the host address where the data will be written to.
+ */
+ uint64_t host_dest_addr;
+ /* The 0-based index of the directory entry. */
+ uint16_t dir_idx;
+ uint8_t unused_0[2];
+ /* The NVRAM byte-offset to read from. */
+ uint32_t offset;
+ /* The length of the data to be read, in bytes. */
+ uint32_t len;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_nvm_raw_dump *
+ *********************/
+
+
+/* hwrm_nvm_raw_dump_input (size:256b/32B) */
+struct hwrm_nvm_raw_dump_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Destination Address.
+ * This is the host address where the data will be written to.
+ */
+ uint64_t host_dest_addr;
+ /* 32-bit NVRAM byte-offset to read from. */
+ uint32_t offset;
+ /* Total length of NVRAM contents to be read, in bytes. */
+ uint32_t len;
+} __attribute__((packed));
+
+/* hwrm_nvm_raw_dump_output (size:128b/16B) */
+struct hwrm_nvm_raw_dump_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_nvm_get_dir_entries *
+ ****************************/
+
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Destination Address.
+ * This is the host address where the directory will be written.
+ */
+ uint64_t host_dest_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_get_dir_info *
+ *************************/
+
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of directory entries in the directory. */
+ uint32_t entries;
+ /* Size of each directory entry, in bytes. */
+ uint32_t entry_length;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************
+ * hwrm_nvm_write *
+ ******************/
+
+
+/* hwrm_nvm_write_input (size:384b/48B) */
+struct hwrm_nvm_write_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Source Address.
+ * This is where the source data is.
+ */
+ uint64_t host_src_addr;
+ /* The Directory Entry Type (valid values are defined in the bnxnvm_directory_type enum defined in the file bnxnvm_defs.h). */
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The 0-based instance of the combined Directory Entry Type and Extension.
+ */
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags (see BNX_DIR_EXT_* in the file bnxnvm_defs.h). */
+ uint16_t dir_ext;
+ /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the file bnxnvm_defs.h). */
+ uint16_t dir_attr;
+ /*
+ * Length of data to write, in bytes. May be less than or equal to the allocated size for the directory entry.
+ * The data length stored in the directory entry will be updated to reflect this value once the write is complete.
+ */
+ uint32_t dir_data_length;
+ /* Option. */
+ uint16_t option;
+ uint16_t flags;
+ /*
+ * When this bit is '1', the original active image
+ * will not be removed. TBD: what purpose is this?
+ */
+ #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG \
+ UINT32_C(0x1)
+ /*
+ * The requested length of the allocated NVM for the item, in bytes. This value may be greater than or equal to the specified data length (dir_data_length).
+ * If this value is less than the specified data length, it will be ignored.
+ * The response will contain the actual allocated item length, which may be greater than the requested item length.
+ * The purpose for allocating more than the required number of bytes for an item's data is to pre-allocate extra storage (padding) to accomodate
+ * the potential future growth of an item (e.g. upgraded firmware with a size increase, log growth, expanded configuration data).
+ */
+ uint32_t dir_item_length;
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Length of the allocated NVM for the item, in bytes. The value may be greater than or equal to the specified data length or the requested item length.
+ * The actual item length used when creating a new directory entry will be a multiple of an NVM block size.
+ */
+ uint32_t dir_item_length;
+ /* The directory index of the created or modified item. */
+ uint16_t dir_idx;
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to fragmentation */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1)
+ /* nvm is completely full. */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2)
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*******************
+ * hwrm_nvm_modify *
+ *******************/
+
+
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Source Address.
+ * This is where the modified data is.
+ */
+ uint64_t host_src_addr;
+ /* 16-bit directory entry index. */
+ uint16_t dir_idx;
+ uint8_t unused_0[2];
+ /* 32-bit NVRAM byte-offset to modify content from. */
+ uint32_t offset;
+ /*
+ * Length of data to be modified, in bytes. The length shall
+ * be non-zero.
+ */
+ uint32_t len;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_nvm_find_dir_entry *
+ ***************************/
+
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dir_idx_valid field to be
+ * configured.
+ */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID \
+ UINT32_C(0x1)
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ /* Directory Entry (Image) Type */
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The instance of this Directory Type
+ */
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags. */
+ uint16_t dir_ext;
+ /* This value indicates the search option using dir_ordinal. */
+ uint8_t opt_ordinal;
+ /* This value indicates the search option using dir_ordinal. */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_MASK UINT32_C(0x3)
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_SFT 0
+ /* Equal to specified ordinal value. */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_EQ UINT32_C(0x0)
+ /* Greater than or equal to specified ordinal value */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GE UINT32_C(0x1)
+ /* Greater than specified ordinal value */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT UINT32_C(0x2)
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_LAST \
+ HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Allocated NVRAM for this directory entry, in bytes. */
+ uint32_t dir_item_length;
+ /* Size of the stored data for this directory entry, in bytes. */
+ uint32_t dir_data_length;
+ /*
+ * Firmware version.
+ * Only valid if the directory entry is for embedded firmware stored in APE_BIN Format.
+ */
+ uint32_t fw_ver;
+ /* Directory ordinal. */
+ uint16_t dir_ordinal;
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_nvm_erase_dir_entry *
+ ****************************/
+
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_get_dev_info *
+ *************************/
+
+
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dev_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
+struct hwrm_nvm_get_dev_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Manufacturer ID. */
+ uint16_t manufacturer_id;
+ /* Device ID. */
+ uint16_t device_id;
+ /* Sector size of the NVRAM device. */
+ uint32_t sector_size;
+ /* Total size, in bytes of the NVRAM device. */
+ uint32_t nvram_size;
+ uint32_t reserved_size;
+ /* Available size that can be used, in bytes. Available size is the NVRAM size take away the used size and reserved size. */
+ uint32_t available_size;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_nvm_mod_dir_entry *
+ **************************/
+
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the checksum field to be
+ * configured.
+ */
+ #define HWRM_NVM_MOD_DIR_ENTRY_INPUT_ENABLES_CHECKSUM UINT32_C(0x1)
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ /*
+ * Directory ordinal.
+ * The (0-based) instance of this Directory Type.
+ */
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags (see BNX_DIR_EXT_* for extension flag definitions). */
+ uint16_t dir_ext;
+ /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* for attribute flag definitions). */
+ uint16_t dir_attr;
+ /*
+ * If valid, then this field updates the checksum
+ * value of the content in the directory entry.
+ */
+ uint32_t checksum;
+} __attribute__((packed));
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_nvm_verify_update *
+ **************************/
+
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Directory Entry Type, to be verified. */
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The instance of the Directory Type to be verified.
+ */
+ uint16_t dir_ordinal;
+ /*
+ * The Directory Entry Extension flags.
+ * The "UPDATE" extension flag must be set in this value.
+ * A corresponding directory entry with the same type and ordinal values but *without*
+ * the "UPDATE" extension flag must also exist. The other flags of the extension must
+ * be identical between the active and update entries.
+ */
+ uint16_t dir_ext;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_nvm_install_update *
+ ***************************/
+
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Installation type. If the value 3 through 0xffff is used,
+ * only packaged items with that type value will be installed and
+ * conditional installation directives for those packaged items
+ * will be over-ridden (i.e. 'create' or 'replace' will be treated
+ * as 'install').
+ */
+ uint32_t install_type;
+ /*
+ * Perform a normal package installation. Conditional installation
+ * directives (e.g. 'create' and 'replace') of packaged items
+ * will be followed.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_NORMAL UINT32_C(0x0)
+ /*
+ * Install all packaged items regardless of installation directive
+ * (i.e. treat all packaged items as though they have an installation
+ * directive of 'install').
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL \
+ UINT32_C(0xffffffff)
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_LAST \
+ HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL
+ uint16_t flags;
+ /* If set to 1, then securely erase all unused locations in persistent storage. */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ERASE_UNUSED_SPACE \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then unspecifed images, images not in the package file, will be safely deleted.
+ * When combined with erase_unused_space then unspecified images will be securely erased.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_REMOVE_UNUSED_PKG \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, FW will defragment the NVM if defragmentation is required for the update.
+ * Allow additional time for this command to complete if this bit is set to 1.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ALLOWED_TO_DEFRAG \
+ UINT32_C(0x4)
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Bit-mask of successfully installed items.
+ * Bit-0 corresponding to the first packaged item, Bit-1 for the second item, etc.
+ * A value of 0 indicates that no items were successfully installed.
+ */
+ uint64_t installed_items;
+ /* result is 8 b */
+ uint8_t result;
+ /* There was no problem with the package installation. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS UINT32_C(0x0)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS
+ /* problem_item is 8 b */
+ uint8_t problem_item;
+ /* There was no problem with any packaged items. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_NONE \
+ UINT32_C(0x0)
+ /* There was a problem with the NVM package itself. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE \
+ UINT32_C(0xff)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE
+ /* reset_required is 8 b */
+ uint8_t reset_required;
+ /*
+ * No reset is required for installed/updated firmware or
+ * microcode to take effect.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_NONE \
+ UINT32_C(0x0)
+ /*
+ * A PCIe reset (e.g. system reboot) is
+ * required for newly installed/updated firmware or
+ * microcode to take effect.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_PCI \
+ UINT32_C(0x1)
+ /*
+ * A controller power reset (e.g. system power-cycle) is
+ * required for newly installed/updated firmware or
+ * microcode to take effect. Some newly installed/updated
+ * firmware or microcode may still take effect upon the
+ * next PCIe reset.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER \
+ UINT32_C(0x2)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER
+ uint8_t unused_0[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to fragmentation */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1)
+ /* nvm is completely full. */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2)
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/******************
+ * hwrm_nvm_flush *
+ ******************/
+
+
+/* hwrm_nvm_flush_input (size:128b/16B) */
+struct hwrm_nvm_flush_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_flush_output (size:128b/16B) */
+struct hwrm_nvm_flush_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_flush_cmd_err (size:64b/8B) */
+struct hwrm_nvm_flush_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* flush could not be performed */
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL UINT32_C(0x1)
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_LAST \
+ HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_get_variable *
+ *************************/
+
+
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * nvm variable will be stored
+ */
+ uint64_t dest_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
+ /*
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
+ */
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t flags;
+ /*
+ * When this bit is set to 1, the factory default value will be returned,
+ * 0 returns the operational value.
+ */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT \
+ UINT32_C(0x1)
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* size of data of the actual variable retrieved in bits */
+ uint16_t data_len;
+ /*
+ * option_num is the option number for the data retrieved. It is possible in the
+ * future that the option number returned would be different than requested. This
+ * condition could occur if an option is deprecated and a new option id is defined
+ * with similar characteristics, but has a slightly different definition. This
+ * also makes it convenient for the caller to identify the variable result with
+ * the option id from the response.
+ */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_LAST \
+ HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* variable does not exist */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
+ /* configuration is corrupted and the variable cannot be saved */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
+ /* length specified is too small */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT UINT32_C(0x3)
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_set_variable *
+ *************************/
+
+
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * nvm variable will be copied from
+ */
+ uint64_t src_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
+ /*
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
+ */
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t flags;
+ /* When this bit is 1, flush internal cache after this write operation (see hwrm_nvm_flush command.) */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FORCE_FLUSH \
+ UINT32_C(0x1)
+ /* encryption method */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_MASK \
+ UINT32_C(0xe)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_SFT 1
+ /* No encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_NONE \
+ (UINT32_C(0x0) << 1)
+ /* one-way encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1 \
+ (UINT32_C(0x1) << 1)
+ /* symmetric AES256 encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_AES256 \
+ (UINT32_C(0x2) << 1)
+ /* SHA1 digest appended to plaintext contents, for authentication */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_LAST \
+ HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* variable does not exist */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
+ /* configuration is corrupted and the variable cannot be saved */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/****************************
+ * hwrm_nvm_validate_option *
+ ****************************/
+
+
+/* hwrm_nvm_validate_option_input (size:320b/40B) */
+struct hwrm_nvm_validate_option_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * nvm variable will be copied from
+ */
+ uint64_t src_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_0 \
+ UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF
+ /*
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
+ */
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_nvm_validate_option_output (size:128b/16B) */
+struct hwrm_nvm_validate_option_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t result;
+ /* indicates that the value provided for the option is not matching with the saved data. */
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_NOT_MATCH UINT32_C(0x0)
+ /* indicates that the value provided for the option is matching the saved data. */
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH UINT32_C(0x1)
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_LAST \
+ HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */
+struct hwrm_nvm_validate_option_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST \
+ HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_nvm_factory_defaults *
+ *****************************/
+
+
+/* hwrm_nvm_factory_defaults_input (size:192b/24B) */
+struct hwrm_nvm_factory_defaults_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* mode is 8 b */
+ uint8_t mode;
+ /* If set to 1, it will trigger restoration of factory default settings */
+ #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_RESTORE UINT32_C(0x0)
+ /* If set to 1, it will trigger creation of factory default settings */
+ #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE UINT32_C(0x1)
+ #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_LAST \
+ HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_nvm_factory_defaults_output (size:128b/16B) */
+struct hwrm_nvm_factory_defaults_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t result;
+ /* factory defaults created successfully. */
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_OK \
+ UINT32_C(0x0)
+ /* factory defaults restored successfully. */
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_RESTORE_OK \
+ UINT32_C(0x1)
+ /* factory defaults already created. */
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY \
+ UINT32_C(0x2)
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_LAST \
+ HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */
+struct hwrm_nvm_factory_defaults_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* valid configuration not present to create defaults */
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG \
+ UINT32_C(0x1)
+ /* No saved configuration present to restore, restore failed */
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG \
+ UINT32_C(0x2)
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST \
+ HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG
+ uint8_t unused_0[7];
+} __attribute__((packed));
-struct ctx_hw_stats64 {
- uint64_t rx_ucast_pkts;
- uint64_t rx_mcast_pkts;
- uint64_t rx_bcast_pkts;
- uint64_t rx_drop_pkts;
- uint64_t rx_discard_pkts;
- uint64_t rx_ucast_bytes;
- uint64_t rx_mcast_bytes;
- uint64_t rx_bcast_bytes;
-
- uint64_t tx_ucast_pkts;
- uint64_t tx_mcast_pkts;
- uint64_t tx_bcast_pkts;
- uint64_t tx_drop_pkts;
- uint64_t tx_discard_pkts;
- uint64_t tx_ucast_bytes;
- uint64_t tx_mcast_bytes;
- uint64_t tx_bcast_bytes;
-
- uint64_t tpa_pkts;
- uint64_t tpa_bytes;
- uint64_t tpa_events;
- uint64_t tpa_aborts;
-} __attribute__((packed));
-
-#endif /* _HSI_STRUCT_DEF_DPDK_ */
+#endif /* _HSI_STRUCT_DEF_DPDK_H_ */
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index cae95f8f..c298de83 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h
index cd7227ac..68fbe34d 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.h
+++ b/drivers/net/bnxt/rte_pmd_bnxt.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _PMD_BNXT_H_
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 4a6633ed..acad16a1 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -27,6 +27,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_pmd.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_args.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_8023ad.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_alb.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_flow.c
#
# Export include files
diff --git a/drivers/net/bonding/meson.build b/drivers/net/bonding/meson.build
index b90abc6d..602d2880 100644
--- a/drivers/net/bonding/meson.build
+++ b/drivers/net/bonding/meson.build
@@ -2,7 +2,8 @@
# Copyright(c) 2017 Intel Corporation
name = 'bond' #, james bond :-)
-sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c',
+version = 2
+sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', 'rte_eth_bond_flow.c',
'rte_eth_bond_args.c', 'rte_eth_bond_8023ad.c', 'rte_eth_bond_alb.c')
deps += 'sched' # needed for rte_bitmap.h
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index c452318f..f8cea4b6 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -16,9 +16,12 @@
static void bond_mode_8023ad_ext_periodic_cb(void *arg);
#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
-#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
- bond_dbg_get_time_diff_ms(), slave_id, \
- __func__, ##__VA_ARGS__)
+
+#define MODE4_DEBUG(fmt, ...) \
+ rte_log(RTE_LOG_DEBUG, bond_logtype, \
+ "%6u [Port %u: %s] " fmt, \
+ bond_dbg_get_time_diff_ms(), slave_id, \
+ __func__, ##__VA_ARGS__)
static uint64_t start_time;
@@ -77,44 +80,46 @@ bond_print_lacp(struct lacpdu *l)
if (p_len && p_state[p_len-1] == ' ')
p_state[p_len-1] = '\0';
- RTE_LOG(DEBUG, PMD, "LACP: {\n"\
- " subtype= %02X\n"\
- " ver_num=%02X\n"\
- " actor={ tlv=%02X, len=%02X\n"\
- " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
- " state={ %s }\n"\
- " }\n"\
- " partner={ tlv=%02X, len=%02X\n"\
- " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
- " state={ %s }\n"\
- " }\n"\
- " collector={info=%02X, length=%02X, max_delay=%04X\n, " \
- "type_term=%02X, terminator_length = %02X}\n",\
- l->subtype,\
- l->version_number,\
- l->actor.tlv_type_info,\
- l->actor.info_length,\
- l->actor.port_params.system_priority,\
- a_address,\
- l->actor.port_params.key,\
- l->actor.port_params.port_priority,\
- l->actor.port_params.port_number,\
- a_state,\
- l->partner.tlv_type_info,\
- l->partner.info_length,\
- l->partner.port_params.system_priority,\
- p_address,\
- l->partner.port_params.key,\
- l->partner.port_params.port_priority,\
- l->partner.port_params.port_number,\
- p_state,\
- l->tlv_type_collector_info,\
- l->collector_info_length,\
- l->collector_max_delay,\
- l->tlv_type_terminator,\
- l->terminator_length);
+ RTE_BOND_LOG(DEBUG,
+ "LACP: {\n"
+ " subtype= %02X\n"
+ " ver_num=%02X\n"
+ " actor={ tlv=%02X, len=%02X\n"
+ " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
+ " state={ %s }\n"
+ " }\n"
+ " partner={ tlv=%02X, len=%02X\n"
+ " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
+ " state={ %s }\n"
+ " }\n"
+ " collector={info=%02X, length=%02X, max_delay=%04X\n, "
+ "type_term=%02X, terminator_length = %02X }",
+ l->subtype,
+ l->version_number,
+ l->actor.tlv_type_info,
+ l->actor.info_length,
+ l->actor.port_params.system_priority,
+ a_address,
+ l->actor.port_params.key,
+ l->actor.port_params.port_priority,
+ l->actor.port_params.port_number,
+ a_state,
+ l->partner.tlv_type_info,
+ l->partner.info_length,
+ l->partner.port_params.system_priority,
+ p_address,
+ l->partner.port_params.key,
+ l->partner.port_params.port_priority,
+ l->partner.port_params.port_number,
+ p_state,
+ l->tlv_type_collector_info,
+ l->collector_info_length,
+ l->collector_max_delay,
+ l->tlv_type_terminator,
+ l->terminator_length);
}
+
#define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
#else
#define BOND_PRINT_LACP(lacpdu) do { } while (0)
@@ -200,31 +205,34 @@ show_warnings(uint16_t slave_id)
rte_get_tsc_hz() / 1000);
if (warnings & WRN_RX_QUEUE_FULL) {
- RTE_LOG(DEBUG, PMD,
- "Slave %u: failed to enqueue LACP packet into RX ring.\n"
- "Receive and transmit functions must be invoked on bonded\n"
- "interface at least 10 times per second or LACP will not\n"
- "work correctly\n", slave_id);
+ RTE_BOND_LOG(DEBUG,
+ "Slave %u: failed to enqueue LACP packet into RX ring.\n"
+ "Receive and transmit functions must be invoked on bonded"
+ "interface at least 10 times per second or LACP will notwork correctly",
+ slave_id);
}
if (warnings & WRN_TX_QUEUE_FULL) {
- RTE_LOG(DEBUG, PMD,
- "Slave %u: failed to enqueue LACP packet into TX ring.\n"
- "Receive and transmit functions must be invoked on bonded\n"
- "interface at least 10 times per second or LACP will not\n"
- "work correctly\n", slave_id);
+ RTE_BOND_LOG(DEBUG,
+ "Slave %u: failed to enqueue LACP packet into TX ring.\n"
+ "Receive and transmit functions must be invoked on bonded"
+ "interface at least 10 times per second or LACP will not work correctly",
+ slave_id);
}
if (warnings & WRN_RX_MARKER_TO_FAST)
- RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id);
+ RTE_BOND_LOG(INFO, "Slave %u: marker to early - ignoring.",
+ slave_id);
if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
- RTE_LOG(INFO, PMD,
- "Slave %u: ignoring unknown slow protocol frame type", slave_id);
+ RTE_BOND_LOG(INFO,
+ "Slave %u: ignoring unknown slow protocol frame type",
+ slave_id);
}
if (warnings & WRN_UNKNOWN_MARKER_TYPE)
- RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id);
+ RTE_BOND_LOG(INFO, "Slave %u: ignoring unknown marker type",
+ slave_id);
if (warnings & WRN_NOT_LACP_CAPABLE)
MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
@@ -507,8 +515,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
ACTOR_STATE_SET(port, DISTRIBUTING);
SM_FLAG_SET(port, NTT);
MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
- RTE_LOG(INFO, PMD,
- "Bond %u: slave id %u distributing started.\n",
+ RTE_BOND_LOG(INFO,
+ "Bond %u: slave id %u distributing started.",
internals->port_id, slave_id);
}
} else {
@@ -518,8 +526,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
ACTOR_STATE_CLR(port, DISTRIBUTING);
SM_FLAG_SET(port, NTT);
MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
- RTE_LOG(INFO, PMD,
- "Bond %u: slave id %u distributing stopped.\n",
+ RTE_BOND_LOG(INFO,
+ "Bond %u: slave id %u distributing stopped.",
internals->port_id, slave_id);
}
}
@@ -557,7 +565,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
if (lacp_pkt == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n");
+ RTE_BOND_LOG(ERR, "Failed to allocate LACP packet from pool");
return;
}
@@ -1337,7 +1345,7 @@ bond_8023ad_setup_validate(uint16_t port_id,
conf->tx_period_ms == 0 ||
conf->rx_marker_period_ms == 0 ||
conf->update_timeout_ms == 0) {
- RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n");
+ RTE_BOND_LOG(ERR, "given mode 4 configuration is invalid");
return -EINVAL;
}
}
diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c
index 3f9945b3..c3891c7e 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.c
+++ b/drivers/net/bonding/rte_eth_bond_alb.c
@@ -60,8 +60,8 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
0, data_size, socket_id);
if (internals->mode6.mempool == NULL) {
- RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB mempool.\n",
- bond_dev->device->name);
+ RTE_BOND_LOG(ERR, "%s: Failed to initialize ALB mempool.\n",
+ bond_dev->device->name);
goto mempool_alloc_error;
}
}
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index f854b737..d558df8b 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -194,7 +194,8 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
uint16_t first;
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
- if (bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter == 0)
+ if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
return 0;
internals = bonded_eth_dev->data->dev_private;
@@ -211,9 +212,12 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
for (i = 0, mask = 1;
i < RTE_BITMAP_SLAB_BIT_SIZE;
i ++, mask <<= 1) {
- if (unlikely(slab & mask))
+ if (unlikely(slab & mask)) {
+ uint16_t vlan_id = pos + i;
+
res = rte_eth_dev_vlan_filter(slave_port_id,
- (uint16_t)pos, 1);
+ vlan_id, 1);
+ }
}
found = rte_bitmap_scan(internals->vlan_filter_bmp,
&pos, &slab);
@@ -223,6 +227,49 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
}
static int
+slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
+{
+ struct rte_flow *flow;
+ struct rte_flow_error ferror;
+ uint16_t slave_port_id = internals->slaves[slave_id].port_id;
+
+ if (internals->flow_isolated_valid != 0) {
+ rte_eth_dev_stop(slave_port_id);
+ if (rte_flow_isolate(slave_port_id, internals->flow_isolated,
+ &ferror)) {
+ RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave"
+ " %d: %s", slave_id, ferror.message ?
+ ferror.message : "(no stated reason)");
+ return -1;
+ }
+ }
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ flow->flows[slave_id] = rte_flow_create(slave_port_id,
+ &flow->fd->attr,
+ flow->fd->items,
+ flow->fd->actions,
+ &ferror);
+ if (flow->flows[slave_id] == NULL) {
+ RTE_BOND_LOG(ERR, "Cannot create flow for slave"
+ " %d: %s", slave_id,
+ ferror.message ? ferror.message :
+ "(no stated reason)");
+ /* Destroy successful bond flows from the slave */
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ if (flow->flows[slave_id] != NULL) {
+ rte_flow_destroy(slave_port_id,
+ flow->flows[slave_id],
+ &ferror);
+ flow->flows[slave_id] = NULL;
+ }
+ }
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
__eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
@@ -284,6 +331,8 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
/* Take the first dev's offload capabilities */
internals->rx_offload_capa = dev_info.rx_offload_capa;
internals->tx_offload_capa = dev_info.tx_offload_capa;
+ internals->rx_queue_offload_capa = dev_info.rx_queue_offload_capa;
+ internals->tx_queue_offload_capa = dev_info.tx_queue_offload_capa;
internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads;
/* Inherit first slave's max rx packet size */
@@ -292,6 +341,8 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
} else {
internals->rx_offload_capa &= dev_info.rx_offload_capa;
internals->tx_offload_capa &= dev_info.tx_offload_capa;
+ internals->rx_queue_offload_capa &= dev_info.rx_queue_offload_capa;
+ internals->tx_queue_offload_capa &= dev_info.tx_queue_offload_capa;
internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
if (link_properties_valid(bonded_eth_dev,
@@ -316,6 +367,12 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
internals->flow_type_rss_offloads;
+ if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) {
+ RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d",
+ slave_port_id);
+ return -1;
+ }
+
internals->slave_count++;
if (bonded_eth_dev->data->dev_started) {
@@ -393,6 +450,8 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
struct rte_eth_dev *slave_eth_dev;
+ struct rte_flow_error flow_error;
+ struct rte_flow *flow;
int i, slave_idx;
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
@@ -432,6 +491,18 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
rte_eth_dev_default_mac_addr_set(slave_port_id,
&(internals->slaves[slave_idx].persisted_mac_addr));
+ /*
+ * Remove bond device flows from slave device.
+ * Note: don't restore flow isolate mode.
+ */
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ if (flow->flows[slave_idx] != NULL) {
+ rte_flow_destroy(slave_port_id, flow->flows[slave_idx],
+ &flow_error);
+ flow->flows[slave_idx] = NULL;
+ }
+ }
+
slave_eth_dev = &rte_eth_devices[slave_port_id];
slave_remove(internals, slave_eth_dev);
slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
@@ -458,6 +529,8 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
if (internals->slave_count == 0) {
internals->rx_offload_capa = 0;
internals->tx_offload_capa = 0;
+ internals->rx_queue_offload_capa = 0;
+ internals->tx_queue_offload_capa = 0;
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
internals->reta_size = 0;
internals->candidate_max_rx_pktlen = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
index 27d3101b..b60fde6a 100644
--- a/drivers/net/bonding/rte_eth_bond_args.c
+++ b/drivers/net/bonding/rte_eth_bond_args.c
@@ -32,7 +32,7 @@ find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
struct rte_pci_addr *eth_pci_addr;
unsigned i;
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]);
eth_pci_addr = &pci_dev->addr;
@@ -50,7 +50,7 @@ find_port_id_by_dev_name(const char *name)
{
unsigned i;
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
if (rte_eth_devices[i].data == NULL)
continue;
@@ -92,7 +92,7 @@ parse_port_id(const char *port_str)
if (pci_bus->parse(port_str, &dev_addr) == 0) {
dev = pci_bus->find_device(NULL, bond_pci_addr_cmp, &dev_addr);
if (dev == NULL) {
- RTE_LOG(ERR, PMD, "unable to find PCI device\n");
+ RTE_BOND_LOG(ERR, "unable to find PCI device");
return -1;
}
port_id = find_port_id_by_pci_addr(&dev_addr);
@@ -134,7 +134,8 @@ bond_ethdev_parse_slave_port_kvarg(const char *key,
if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) {
int port_id = parse_port_id(value);
if (port_id < 0) {
- RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", value);
+ RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified",
+ value);
return -1;
} else
slave_ports->slaves[slave_ports->slave_count++] =
@@ -244,7 +245,7 @@ bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
if (primary_slave_port_id < 0)
return -1;
- *(uint8_t *)extra_args = (uint8_t)primary_slave_port_id;
+ *(uint16_t *)extra_args = (uint16_t)primary_slave_port_id;
return 0;
}
diff --git a/drivers/net/bonding/rte_eth_bond_flow.c b/drivers/net/bonding/rte_eth_bond_flow.c
new file mode 100644
index 00000000..31e4bcae
--- /dev/null
+++ b/drivers/net/bonding/rte_eth_bond_flow.c
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <sys/queue.h>
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_flow.h>
+
+#include "rte_eth_bond_private.h"
+
+static struct rte_flow *
+bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow *flow;
+ size_t fdsz;
+
+ fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
+ flow = rte_zmalloc_socket(NULL, sizeof(struct rte_flow) + fdsz,
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (unlikely(flow == NULL)) {
+ RTE_BOND_LOG(ERR, "Could not allocate new flow");
+ return NULL;
+ }
+ flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
+ if (unlikely(rte_flow_copy(flow->fd, fdsz, attr, items, actions) !=
+ fdsz)) {
+ RTE_BOND_LOG(ERR, "Failed to copy flow description");
+ rte_free(flow);
+ return NULL;
+ }
+ return flow;
+}
+
+static void
+bond_flow_release(struct rte_flow **flow)
+{
+ rte_free(*flow);
+ *flow = NULL;
+}
+
+static int
+bond_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+ int ret;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_flow_validate(internals->slaves[i].port_id, attr,
+ patterns, actions, err);
+ if (ret) {
+ RTE_BOND_LOG(ERR, "Operation rte_flow_validate failed"
+ " for slave %d with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static struct rte_flow *
+bond_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_flow *flow;
+ int i;
+
+ flow = bond_flow_alloc(dev->data->numa_node, attr, patterns, actions);
+ if (unlikely(flow == NULL)) {
+ rte_flow_error_set(err, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOMEM));
+ return NULL;
+ }
+ for (i = 0; i < internals->slave_count; i++) {
+ flow->flows[i] = rte_flow_create(internals->slaves[i].port_id,
+ attr, patterns, actions, err);
+ if (unlikely(flow->flows[i] == NULL)) {
+ RTE_BOND_LOG(ERR, "Failed to create flow on slave %d",
+ i);
+ goto err;
+ }
+ }
+ TAILQ_INSERT_TAIL(&internals->flow_list, flow, next);
+ return flow;
+err:
+ /* Destroy all slaves flows. */
+ for (i = 0; i < internals->slave_count; i++) {
+ if (flow->flows[i] != NULL)
+ rte_flow_destroy(internals->slaves[i].port_id,
+ flow->flows[i], err);
+ }
+ bond_flow_release(&flow);
+ return NULL;
+}
+
+static int
+bond_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ int lret;
+
+ if (unlikely(flow->flows[i] == NULL))
+ continue;
+ lret = rte_flow_destroy(internals->slaves[i].port_id,
+ flow->flows[i], err);
+ if (unlikely(lret != 0)) {
+ RTE_BOND_LOG(ERR, "Failed to destroy flow on slave %d:"
+ " %d", i, lret);
+ ret = lret;
+ }
+ }
+ TAILQ_REMOVE(&internals->flow_list, flow, next);
+ bond_flow_release(&flow);
+ return ret;
+}
+
+static int
+bond_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_flow *flow;
+ void *tmp;
+ int ret = 0;
+ int lret;
+
+ /* Destroy all bond flows from its slaves instead of flushing them to
+ * keep the LACP flow or any other external flows.
+ */
+ TAILQ_FOREACH_SAFE(flow, &internals->flow_list, next, tmp) {
+ lret = bond_flow_destroy(dev, flow, err);
+ if (unlikely(lret != 0))
+ ret = lret;
+ }
+ if (unlikely(ret != 0))
+ RTE_BOND_LOG(ERR, "Failed to flush flow in all slaves");
+ return ret;
+}
+
+static int
+bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_flow_query_count slave_count;
+ int i;
+ int ret;
+
+ count->bytes = 0;
+ count->hits = 0;
+ rte_memcpy(&slave_count, count, sizeof(slave_count));
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_flow_query(internals->slaves[i].port_id,
+ flow->flows[i], action,
+ &slave_count, err);
+ if (unlikely(ret != 0)) {
+ RTE_BOND_LOG(ERR, "Failed to query flow on"
+ " slave %d: %d", i, ret);
+ return ret;
+ }
+ count->bytes += slave_count.bytes;
+ count->hits += slave_count.hits;
+ slave_count.bytes = 0;
+ slave_count.hits = 0;
+ }
+ return 0;
+}
+
+static int
+bond_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *arg,
+ struct rte_flow_error *err)
+{
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ return bond_flow_query_count(dev, flow, action, arg, err);
+ default:
+ return rte_flow_error_set(err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, arg,
+ rte_strerror(ENOTSUP));
+ }
+}
+
+static int
+bond_flow_isolate(struct rte_eth_dev *dev, int set,
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+ int ret;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_flow_isolate(internals->slaves[i].port_id, set, err);
+ if (unlikely(ret != 0)) {
+ RTE_BOND_LOG(ERR, "Operation rte_flow_isolate failed"
+ " for slave %d with error %d", i, ret);
+ internals->flow_isolated_valid = 0;
+ return ret;
+ }
+ }
+ internals->flow_isolated = set;
+ internals->flow_isolated_valid = 1;
+ return 0;
+}
+
+const struct rte_flow_ops bond_flow_ops = {
+ .validate = bond_flow_validate,
+ .create = bond_flow_create,
+ .destroy = bond_flow_destroy,
+ .flush = bond_flow_flush,
+ .query = bond_flow_query,
+ .isolate = bond_flow_isolate,
+};
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index c34c3251..02d94b1b 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -17,6 +17,7 @@
#include <rte_bus_vdev.h>
#include <rte_alarm.h>
#include <rte_cycles.h>
+#include <rte_string_fns.h>
#include "rte_eth_bond.h"
#include "rte_eth_bond_private.h"
@@ -570,34 +571,21 @@ update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
}
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
-#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
- RTE_LOG(DEBUG, PMD, \
- "%s " \
- "port:%d " \
- "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
- "SrcIP:%s " \
- "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
- "DstIP:%s " \
- "%s " \
- "%d\n", \
- info, \
- port, \
- eth_h->s_addr.addr_bytes[0], \
- eth_h->s_addr.addr_bytes[1], \
- eth_h->s_addr.addr_bytes[2], \
- eth_h->s_addr.addr_bytes[3], \
- eth_h->s_addr.addr_bytes[4], \
- eth_h->s_addr.addr_bytes[5], \
- src_ip, \
- eth_h->d_addr.addr_bytes[0], \
- eth_h->d_addr.addr_bytes[1], \
- eth_h->d_addr.addr_bytes[2], \
- eth_h->d_addr.addr_bytes[3], \
- eth_h->d_addr.addr_bytes[4], \
- eth_h->d_addr.addr_bytes[5], \
- dst_ip, \
- arp_op, \
- ++burstnumber)
+#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
+ rte_log(RTE_LOG_DEBUG, bond_logtype, \
+ "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
+ "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
+ info, \
+ port, \
+ eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
+ eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
+ eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
+ src_ip, \
+ eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
+ eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
+ eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
+ dst_ip, \
+ arp_op, ++burstnumber)
#endif
static void
@@ -617,7 +605,7 @@ mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
uint16_t offset = get_vlan_offset(eth_h, &ether_type);
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
- snprintf(buf, 16, "%s", info);
+ strlcpy(buf, info, 16);
#endif
if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
@@ -1138,7 +1126,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* Allocate new packet to send ARP update on current slave */
upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
if (upd_pkt == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate ARP packet from pool");
continue;
}
pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
@@ -1560,12 +1549,12 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
struct ether_addr *mac_addr;
if (eth_dev == NULL) {
- RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
+ RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
return -1;
}
if (dst_mac_addr == NULL) {
- RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
+ RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
return -1;
}
@@ -1686,9 +1675,9 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
if (internals->mode4.dedicated_queues.enabled == 0) {
eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
- RTE_LOG(WARNING, PMD,
+ RTE_BOND_LOG(WARNING,
"Using mode 4, it is necessary to do TX burst "
- "and RX burst at least every 100ms.\n");
+ "and RX burst at least every 100ms.");
} else {
/* Use flow director's optimization */
eth_dev->rx_pkt_burst =
@@ -1818,8 +1807,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
}
- slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
- bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
+ if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ slave_eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+ else
+ slave_eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_VLAN_FILTER;
nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
@@ -1831,12 +1825,20 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
}
+ errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
+ bonded_eth_dev->data->mtu);
+ if (errval != 0 && errval != -ENOTSUP) {
+ RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ return errval;
+ }
+
/* Configure device */
errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
nb_rx_queues, nb_tx_queues,
&(slave_eth_dev->data->dev_conf));
if (errval != 0) {
- RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
+ RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
slave_eth_dev->data->port_id, errval);
return errval;
}
@@ -1918,10 +1920,10 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
&internals->reta_conf[0],
internals->slaves[i].reta_size);
if (errval != 0) {
- RTE_LOG(WARNING, PMD,
- "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
- " RSS Configuration for bonding may be inconsistent.\n",
- slave_eth_dev->data->port_id, errval);
+ RTE_BOND_LOG(WARNING,
+ "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
+ " RSS Configuration for bonding may be inconsistent.",
+ slave_eth_dev->data->port_id, errval);
}
break;
}
@@ -1950,10 +1952,19 @@ slave_remove(struct bond_dev_private *internals,
slave_eth_dev->data->port_id)
break;
- if (i < (internals->slave_count - 1))
+ if (i < (internals->slave_count - 1)) {
+ struct rte_flow *flow;
+
memmove(&internals->slaves[i], &internals->slaves[i + 1],
sizeof(internals->slaves[0]) *
(internals->slave_count - i - 1));
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ memmove(&flow->flows[i], &flow->flows[i + 1],
+ sizeof(flow->flows[0]) *
+ (internals->slave_count - i - 1));
+ flow->flows[internals->slave_count - 1] = NULL;
+ }
+ }
internals->slave_count--;
@@ -2026,7 +2037,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
if (internals->slave_count == 0) {
RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
- return -1;
+ goto out_err;
}
if (internals->user_defined_mac == 0) {
@@ -2037,18 +2048,18 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
new_mac_addr = &internals->slaves[i].persisted_mac_addr;
if (new_mac_addr == NULL)
- return -1;
+ goto out_err;
if (mac_address_set(eth_dev, new_mac_addr) != 0) {
RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
eth_dev->data->port_id);
- return -1;
+ goto out_err;
}
}
/* Update all slave devices MACs*/
if (mac_address_slaves_update(eth_dev) != 0)
- return -1;
+ goto out_err;
/* If bonded device is configure in promiscuous mode then re-apply config */
if (internals->promiscuous_en)
@@ -2073,7 +2084,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
"bonded port (%d) failed to reconfigure slave device (%d)",
eth_dev->data->port_id,
internals->slaves[i].port_id);
- return -1;
+ goto out_err;
}
/* We will need to poll for link status if any slave doesn't
* support interrupts
@@ -2081,6 +2092,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
if (internals->slaves[i].link_status_poll_enabled)
internals->link_status_polling_enabled = 1;
}
+
/* start polling if needed */
if (internals->link_status_polling_enabled) {
rte_eal_alarm_set(
@@ -2100,6 +2112,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
bond_tlb_enable(internals);
return 0;
+
+out_err:
+ eth_dev->data->dev_started = 0;
+ return -1;
}
static void
@@ -2172,20 +2188,22 @@ bond_ethdev_close(struct rte_eth_dev *dev)
struct bond_dev_private *internals = dev->data->dev_private;
uint8_t bond_port_id = internals->port_id;
int skipped = 0;
+ struct rte_flow_error ferror;
- RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name);
+ RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
while (internals->slave_count != skipped) {
uint16_t port_id = internals->slaves[skipped].port_id;
rte_eth_dev_stop(port_id);
if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to remove port %d from bonded device "
- "%s\n", port_id, dev->device->name);
+ RTE_BOND_LOG(ERR,
+ "Failed to remove port %d from bonded device %s",
+ port_id, dev->device->name);
skipped++;
}
}
+ bond_flow_ops.flush(dev, &ferror);
bond_ethdev_free_queues(dev);
rte_bitmap_reset(internals->vlan_filter_bmp);
}
@@ -2244,6 +2262,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_offload_capa = internals->rx_offload_capa;
dev_info->tx_offload_capa = internals->tx_offload_capa;
+ dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
dev_info->reta_size = internals->reta_size;
@@ -2269,9 +2289,9 @@ bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
if (res == ENOTSUP)
- RTE_LOG(WARNING, PMD,
- "Setting VLAN filter on slave port %u not supported.\n",
- port_id);
+ RTE_BOND_LOG(WARNING,
+ "Setting VLAN filter on slave port %u not supported.",
+ port_id);
}
rte_spinlock_unlock(&internals->lock);
@@ -2633,14 +2653,21 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
if (!valid_slave)
return rc;
+ /* Synchronize lsc callback parallel calls either by real link event
+ * from the slaves PMDs or by the bonding PMD itself.
+ */
+ rte_spinlock_lock(&internals->lsc_lock);
+
/* Search for port in active port list */
active_pos = find_slave_by_id(internals->active_slaves,
internals->active_slave_count, port_id);
rte_eth_link_get_nowait(port_id, &link);
if (link.link_status) {
- if (active_pos < internals->active_slave_count)
+ if (active_pos < internals->active_slave_count) {
+ rte_spinlock_unlock(&internals->lsc_lock);
return rc;
+ }
/* if no active slave ports then set this port to be primary port */
if (internals->active_slave_count < 1) {
@@ -2659,8 +2686,10 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
internals->primary_port == port_id)
bond_ethdev_primary_set(internals, port_id);
} else {
- if (active_pos == internals->active_slave_count)
+ if (active_pos == internals->active_slave_count) {
+ rte_spinlock_unlock(&internals->lsc_lock);
return rc;
+ }
/* Remove from active slave list */
deactivate_slave(bonded_eth_dev, port_id);
@@ -2713,6 +2742,9 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
NULL);
}
}
+
+ rte_spinlock_unlock(&internals->lsc_lock);
+
return 0;
}
@@ -2851,11 +2883,26 @@ bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
-static void
+static int
bond_ethdev_mac_address_set(struct rte_eth_dev *dev, struct ether_addr *addr)
{
- if (mac_address_set(dev, addr))
+ if (mac_address_set(dev, addr)) {
RTE_BOND_LOG(ERR, "Failed to update MAC address");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type type, enum rte_filter_op op, void *arg)
+{
+ if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
+ *(const void **)arg = &bond_flow_ops;
+ return 0;
+ }
+ return -ENOTSUP;
}
const struct eth_dev_ops default_dev_ops = {
@@ -2879,7 +2926,8 @@ const struct eth_dev_ops default_dev_ops = {
.rss_hash_update = bond_ethdev_rss_hash_update,
.rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
.mtu_set = bond_ethdev_mtu_set,
- .mac_addr_set = bond_ethdev_mac_address_set
+ .mac_addr_set = bond_ethdev_mac_address_set,
+ .filter_ctrl = bond_filter_ctrl
};
static int
@@ -2917,6 +2965,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
rte_spinlock_init(&internals->lock);
+ rte_spinlock_init(&internals->lsc_lock);
internals->port_id = eth_dev->data->port_id;
internals->mode = BONDING_MODE_INVALID;
@@ -2936,6 +2985,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
internals->active_slave_count = 0;
internals->rx_offload_capa = 0;
internals->tx_offload_capa = 0;
+ internals->rx_queue_offload_capa = 0;
+ internals->tx_queue_offload_capa = 0;
internals->candidate_max_rx_pktlen = 0;
internals->max_rx_pktlen = 0;
@@ -2945,10 +2996,13 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
memset(internals->slaves, 0, sizeof(internals->slaves));
+ TAILQ_INIT(&internals->flow_list);
+ internals->flow_isolated_valid = 0;
+
/* Set mode 4 default configuration */
bond_mode_8023ad_setup(eth_dev, NULL);
if (bond_ethdev_mode_set(eth_dev, mode)) {
- RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
+ RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
eth_dev->data->port_id, mode);
goto err;
}
@@ -2959,7 +3013,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
RTE_CACHE_LINE_SIZE);
if (internals->vlan_filter_bmpmem == NULL) {
RTE_BOND_LOG(ERR,
- "Failed to allocate vlan bitmap for bonded device %u\n",
+ "Failed to allocate vlan bitmap for bonded device %u",
eth_dev->data->port_id);
goto err;
}
@@ -2968,7 +3022,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
if (internals->vlan_filter_bmp == NULL) {
RTE_BOND_LOG(ERR,
- "Failed to init vlan bitmap for bonded device %u\n",
+ "Failed to init vlan bitmap for bonded device %u",
eth_dev->data->port_id);
rte_free(internals->vlan_filter_bmpmem);
goto err;
@@ -2994,12 +3048,26 @@ bond_probe(struct rte_vdev_device *dev)
uint8_t bonding_mode, socket_id/*, agg_mode*/;
int arg_count, port_id;
uint8_t agg_mode;
+ struct rte_eth_dev *eth_dev;
if (!dev)
return -EINVAL;
name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
+ RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ RTE_BOND_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &default_dev_ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
pmd_bond_init_valid_arguments);
@@ -3011,13 +3079,13 @@ bond_probe(struct rte_vdev_device *dev)
if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
&bond_ethdev_parse_slave_mode_kvarg,
&bonding_mode) != 0) {
- RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
+ RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
name);
goto parse_error;
}
} else {
- RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
- "device %s\n", name);
+ RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
+ "device %s", name);
goto parse_error;
}
@@ -3027,13 +3095,13 @@ bond_probe(struct rte_vdev_device *dev)
if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
&bond_ethdev_parse_socket_id_kvarg, &socket_id)
!= 0) {
- RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
- "bonded device %s\n", name);
+ RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
+ "bonded device %s", name);
goto parse_error;
}
} else if (arg_count > 1) {
- RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
- "bonded device %s\n", name);
+ RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
+ "bonded device %s", name);
goto parse_error;
} else {
socket_id = rte_socket_id();
@@ -3044,8 +3112,8 @@ bond_probe(struct rte_vdev_device *dev)
/* Create link bonding eth device */
port_id = bond_alloc(dev, bonding_mode);
if (port_id < 0) {
- RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
- "socket %u.\n", name, bonding_mode, socket_id);
+ RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
+ "socket %u.", name, bonding_mode, socket_id);
goto parse_error;
}
internals = rte_eth_devices[port_id].data->dev_private;
@@ -3057,8 +3125,8 @@ bond_probe(struct rte_vdev_device *dev)
PMD_BOND_AGG_MODE_KVARG,
&bond_ethdev_parse_slave_agg_mode_kvarg,
&agg_mode) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to parse agg selection mode for bonded device %s\n",
+ RTE_BOND_LOG(ERR,
+ "Failed to parse agg selection mode for bonded device %s",
name);
goto parse_error;
}
@@ -3070,8 +3138,9 @@ bond_probe(struct rte_vdev_device *dev)
rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE);
}
- RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
- "socket %u.\n", name, port_id, bonding_mode, socket_id);
+ rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
+ RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
+ "socket %u.", name, port_id, bonding_mode, socket_id);
return 0;
parse_error:
@@ -3091,7 +3160,7 @@ bond_remove(struct rte_vdev_device *dev)
return -EINVAL;
name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
+ RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
/* now free all data allocation - for eth_dev structure,
* dummy pci driver and internal (private) data
@@ -3118,6 +3187,10 @@ bond_remove(struct rte_vdev_device *dev)
eth_dev->tx_pkt_burst = NULL;
internals = eth_dev->data->dev_private;
+ /* Try to release mempool used in mode6. If the bond
+ * device is not mode6, free the NULL is not problem.
+ */
+ rte_mempool_free(internals->mode6.mempool);
rte_bitmap_free(internals->vlan_filter_bmp);
rte_free(internals->vlan_filter_bmpmem);
rte_free(eth_dev->data->dev_private);
@@ -3178,23 +3251,23 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
struct ether_addr bond_mac;
if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
- &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
- RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
- name);
+ &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
+ RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
+ name);
return -1;
}
/* Set MAC address */
if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set mac address on bonded device %s\n",
- name);
+ RTE_BOND_LOG(ERR,
+ "Failed to set mac address on bonded device %s",
+ name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(ERR, EAL,
- "MAC address can be specified only once for bonded device %s\n",
- name);
+ RTE_BOND_LOG(ERR,
+ "MAC address can be specified only once for bonded device %s",
+ name);
return -1;
}
@@ -3204,40 +3277,40 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint8_t xmit_policy;
if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
- &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
- 0) {
- RTE_LOG(INFO, EAL,
- "Invalid xmit policy specified for bonded device %s\n",
- name);
+ &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
+ 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid xmit policy specified for bonded device %s",
+ name);
return -1;
}
/* Set balance mode transmit policy*/
if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set balance xmit policy on bonded device %s\n",
- name);
+ RTE_BOND_LOG(ERR,
+ "Failed to set balance xmit policy on bonded device %s",
+ name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(ERR, EAL,
- "Transmit policy can be specified only once for bonded device"
- " %s\n", name);
+ RTE_BOND_LOG(ERR,
+ "Transmit policy can be specified only once for bonded device %s",
+ name);
return -1;
}
if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
if (rte_kvargs_process(kvlist,
- PMD_BOND_AGG_MODE_KVARG,
- &bond_ethdev_parse_slave_agg_mode_kvarg,
- &agg_mode) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to parse agg selection mode for bonded device %s\n",
- name);
+ PMD_BOND_AGG_MODE_KVARG,
+ &bond_ethdev_parse_slave_agg_mode_kvarg,
+ &agg_mode) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to parse agg selection mode for bonded device %s",
+ name);
}
if (internals->mode == BONDING_MODE_8023AD)
- rte_eth_bond_8023ad_agg_selection_set(port_id,
- agg_mode);
+ rte_eth_bond_8023ad_agg_selection_set(port_id,
+ agg_mode);
}
/* Parse/add slave ports to bonded device */
@@ -3248,23 +3321,23 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
memset(&slave_ports, 0, sizeof(slave_ports));
if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
- &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to parse slave ports for bonded device %s\n",
- name);
+ &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to parse slave ports for bonded device %s",
+ name);
return -1;
}
for (i = 0; i < slave_ports.slave_count; i++) {
if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to add port %d as slave to bonded device %s\n",
- slave_ports.slaves[i], name);
+ RTE_BOND_LOG(ERR,
+ "Failed to add port %d as slave to bonded device %s",
+ slave_ports.slaves[i], name);
}
}
} else {
- RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
+ RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
return -1;
}
@@ -3274,27 +3347,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint16_t primary_slave_port_id;
if (rte_kvargs_process(kvlist,
- PMD_BOND_PRIMARY_SLAVE_KVARG,
- &bond_ethdev_parse_primary_slave_port_id_kvarg,
- &primary_slave_port_id) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid primary slave port id specified for bonded device"
- " %s\n", name);
+ PMD_BOND_PRIMARY_SLAVE_KVARG,
+ &bond_ethdev_parse_primary_slave_port_id_kvarg,
+ &primary_slave_port_id) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid primary slave port id specified for bonded device %s",
+ name);
return -1;
}
/* Set balance mode transmit policy*/
if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set primary slave port %d on bonded device %s\n",
- primary_slave_port_id, name);
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set primary slave port %d on bonded device %s",
+ primary_slave_port_id, name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "Primary slave can be specified only once for bonded device"
- " %s\n", name);
+ RTE_BOND_LOG(INFO,
+ "Primary slave can be specified only once for bonded device %s",
+ name);
return -1;
}
@@ -3304,26 +3377,26 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint32_t lsc_poll_interval_ms;
if (rte_kvargs_process(kvlist,
- PMD_BOND_LSC_POLL_PERIOD_KVARG,
- &bond_ethdev_parse_time_ms_kvarg,
- &lsc_poll_interval_ms) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid lsc polling interval value specified for bonded"
- " device %s\n", name);
+ PMD_BOND_LSC_POLL_PERIOD_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &lsc_poll_interval_ms) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid lsc polling interval value specified for bonded"
+ " device %s", name);
return -1;
}
if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set lsc monitor polling interval (%u ms) on"
- " bonded device %s\n", lsc_poll_interval_ms, name);
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
+ lsc_poll_interval_ms, name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "LSC polling interval can be specified only once for bonded"
- " device %s\n", name);
+ RTE_BOND_LOG(INFO,
+ "LSC polling interval can be specified only once for bonded"
+ " device %s", name);
return -1;
}
@@ -3333,27 +3406,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint32_t link_up_delay_ms;
if (rte_kvargs_process(kvlist,
- PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
- &bond_ethdev_parse_time_ms_kvarg,
- &link_up_delay_ms) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid link up propagation delay value specified for"
- " bonded device %s\n", name);
+ PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &link_up_delay_ms) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid link up propagation delay value specified for"
+ " bonded device %s", name);
return -1;
}
/* Set balance mode transmit policy*/
if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set link up propagation delay (%u ms) on bonded"
- " device %s\n", link_up_delay_ms, name);
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set link up propagation delay (%u ms) on bonded"
+ " device %s", link_up_delay_ms, name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "Link up propagation delay can be specified only once for"
- " bonded device %s\n", name);
+ RTE_BOND_LOG(INFO,
+ "Link up propagation delay can be specified only once for"
+ " bonded device %s", name);
return -1;
}
@@ -3363,27 +3436,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint32_t link_down_delay_ms;
if (rte_kvargs_process(kvlist,
- PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
- &bond_ethdev_parse_time_ms_kvarg,
- &link_down_delay_ms) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid link down propagation delay value specified for"
- " bonded device %s\n", name);
+ PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &link_down_delay_ms) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid link down propagation delay value specified for"
+ " bonded device %s", name);
return -1;
}
/* Set balance mode transmit policy*/
if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set link down propagation delay (%u ms) on"
- " bonded device %s\n", link_down_delay_ms, name);
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set link down propagation delay (%u ms) on bonded device %s",
+ link_down_delay_ms, name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "Link down propagation delay can be specified only once for"
- " bonded device %s\n", name);
+ RTE_BOND_LOG(INFO,
+ "Link down propagation delay can be specified only once for bonded device %s",
+ name);
return -1;
}
@@ -3409,3 +3482,14 @@ RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
"lsc_poll_period_ms=<int> "
"up_delay=<int> "
"down_delay=<int>");
+
+int bond_logtype;
+
+RTE_INIT(bond_init_log);
+static void
+bond_init_log(void)
+{
+ bond_logtype = rte_log_register("pmd.net.bon");
+ if (bond_logtype >= 0)
+ rte_log_set_level(bond_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 92e15f8c..65445b86 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -5,9 +5,12 @@
#ifndef _RTE_ETH_BOND_PRIVATE_H_
#define _RTE_ETH_BOND_PRIVATE_H_
+#include <sys/queue.h>
+
#include <rte_ethdev_driver.h>
#include <rte_spinlock.h>
#include <rte_bitmap.h>
+#include <rte_flow_driver.h>
#include "rte_eth_bond.h"
#include "rte_eth_bond_8023ad_private.h"
@@ -28,8 +31,11 @@
#define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23")
#define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34")
+extern int bond_logtype;
+
#define RTE_BOND_LOG(lvl, msg, ...) \
- RTE_LOG(lvl, PMD, "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__)
+ rte_log(RTE_LOG_ ## lvl, bond_logtype, \
+ "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__)
#define BONDING_MODE_INVALID 0xFF
@@ -37,6 +43,8 @@ extern const char *pmd_bond_init_valid_arguments[];
extern struct rte_vdev_driver pmd_bond_drv;
+extern const struct rte_flow_ops bond_flow_ops;
+
/** Port Queue Mapping Structure */
struct bond_rx_queue {
uint16_t queue_id;
@@ -80,6 +88,14 @@ struct bond_slave_details {
uint16_t reta_size;
};
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ /* Slaves flows */
+ struct rte_flow *flows[RTE_MAX_ETHPORTS];
+ /* Flow description for synchronization */
+ struct rte_flow_desc *fd;
+};
+
typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts,
uint8_t slave_count, uint16_t *slaves);
@@ -89,6 +105,7 @@ struct bond_dev_private {
uint8_t mode; /**< Link Bonding Mode */
rte_spinlock_t lock;
+ rte_spinlock_t lsc_lock;
uint16_t primary_port; /**< Primary Slave Port */
uint16_t current_primary_port; /**< Primary Slave Port */
@@ -128,8 +145,17 @@ struct bond_dev_private {
/**< TLB active slaves send order */
struct mode_alb_private mode6;
- uint32_t rx_offload_capa; /** Rx offload capability */
- uint32_t tx_offload_capa; /** Tx offload capability */
+ uint64_t rx_offload_capa; /** Rx offload capability */
+ uint64_t tx_offload_capa; /** Tx offload capability */
+ uint64_t rx_queue_offload_capa; /** per queue Rx offload capability */
+ uint64_t tx_queue_offload_capa; /** per queue Tx offload capability */
+
+ /**< List of the configured flows */
+ TAILQ_HEAD(sub_flows, rte_flow) flow_list;
+
+ /**< Flow isolation state */
+ int flow_isolated;
+ int flow_isolated_valid;
/** Bit mask of RSS offloads, the bit offset also means flow type */
uint64_t flow_type_rss_offloads;
diff --git a/drivers/net/bonding/rte_pmd_bond_version.map b/drivers/net/bonding/rte_pmd_bond_version.map
index ec3374b0..03ddb44e 100644
--- a/drivers/net/bonding/rte_pmd_bond_version.map
+++ b/drivers/net/bonding/rte_pmd_bond_version.map
@@ -1,6 +1,7 @@
DPDK_2.0 {
global:
+ rte_eth_bond_8023ad_slave_info;
rte_eth_bond_active_slaves_get;
rte_eth_bond_create;
rte_eth_bond_link_monitoring_set;
diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index 65df1425..79fdb6f0 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
-# Copyright(c) 2014-2015 Chelsio Communications.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Chelsio Communications nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014-2018 Chelsio Communications.
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -45,12 +18,6 @@ EXPORT_MAP := rte_pmd_cxgbe_version.map
LIBABIVER := 1
-ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
-#
-# CFLAGS for icc
-#
-CFLAGS_BASE_DRIVER = -wd188
-else
#
# CFLAGS for gcc/clang
#
@@ -59,9 +26,7 @@ ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS += -Wno-deprecated
endif
endif
-CFLAGS_BASE_DRIVER =
-endif
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
@@ -80,8 +45,11 @@ VPATH += $(SRCDIR)/base
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index f2057af1..55cb2e91 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
/* This file should not be included directly. Include common.h instead. */
@@ -68,6 +40,7 @@ struct port_info {
u8 port_type; /* firmware port type */
u8 mod_type; /* firmware module type */
u8 port_id; /* physical port ID */
+ u8 pidx; /* port index for this PF */
u8 tx_chan; /* associated channel */
u8 n_rx_qsets; /* # of rx qsets */
@@ -77,6 +50,7 @@ struct port_info {
u16 *rss; /* rss table */
u8 rss_mode; /* rss mode */
u16 rss_size; /* size of VI's RSS table slice */
+ u64 rss_hf; /* RSS Hash Function */
};
/* Enable or disable autonegotiation. If this is set to enable,
@@ -196,6 +170,7 @@ struct sge_eth_rxq { /* a SW Ethernet Rx queue */
* scenario where a packet needs 32 bytes.
*/
#define ETH_COALESCE_PKT_NUM 15
+#define ETH_COALESCE_VF_PKT_NUM 7
#define ETH_COALESCE_PKT_PER_DESC 2
struct tx_eth_coal_desc {
@@ -225,6 +200,10 @@ struct eth_coalesce {
unsigned int len;
unsigned int flits;
unsigned int max;
+ __u8 ethmacdst[ETHER_ADDR_LEN];
+ __u8 ethmacsrc[ETHER_ADDR_LEN];
+ __be16 ethtype;
+ __be16 vlantci;
};
struct sge_txq {
@@ -247,6 +226,7 @@ struct sge_txq {
unsigned int equeidx; /* last sent credit request */
unsigned int last_pidx; /* last pidx recorded by tx monitor */
unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
+ unsigned int abs_id;
int db_disabled; /* doorbell state */
unsigned short db_pidx; /* doorbell producer index */
@@ -267,6 +247,7 @@ struct sge_eth_tx_stats { /* Ethernet tx queue statistics */
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
struct sge_txq q;
struct rte_eth_dev *eth_dev; /* port that this queue belongs to */
+ struct rte_eth_dev_data *data;
struct sge_eth_tx_stats stats; /* queue statistics */
rte_spinlock_t txq_lock;
@@ -308,7 +289,7 @@ struct adapter {
struct rte_pci_device *pdev; /* associated rte pci device */
struct rte_eth_dev *eth_dev; /* first port's rte eth device */
struct adapter_params params; /* adapter parameters */
- struct port_info port[MAX_NPORTS]; /* ports belonging to this adapter */
+ struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */
struct sge sge; /* associated SGE */
/* support for single-threading access to adapter mailbox registers */
@@ -327,6 +308,18 @@ struct adapter {
int use_unpacked_mode; /* unpacked rx mode state */
};
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)
+{
+ return adap->port[idx];
+}
+
#define CXGBE_PCI_REG(reg) rte_read32(reg)
static inline uint64_t cxgbe_read_addr64(volatile void *addr)
@@ -602,7 +595,7 @@ static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap)
static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
u8 hw_addr[])
{
- struct port_info *pi = &adapter->port[port_idx];
+ struct port_info *pi = adap2pinfo(adapter, port_idx);
ether_addr_copy((struct ether_addr *)hw_addr,
&pi->eth_dev->data->mac_addrs[0]);
@@ -687,18 +680,6 @@ static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
t4_os_unlock(lock);
}
-/**
- * adap2pinfo - return the port_info of a port
- * @adap: the adapter
- * @idx: the port index
- *
- * Return the port_info structure for the port of the given index.
- */
-static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
-{
- return &adap->port[idx];
-}
-
void *t4_alloc_mem(size_t size);
void t4_free_mem(void *addr);
#define t4_os_alloc(_size) t4_alloc_mem((_size))
@@ -716,6 +697,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_sge_init(struct adapter *adap);
+int t4vf_sge_init(struct adapter *adap);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct rte_eth_dev *eth_dev, uint16_t queue_id,
unsigned int iqid, int socket_id);
@@ -735,6 +717,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
unsigned int cnt);
int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
unsigned int budget, unsigned int *work_done);
-int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues);
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags);
#endif /* __T4_ADAPTER_H__ */
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 1eda57d0..155a3028 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __CHELSIO_COMMON_H
@@ -36,6 +8,7 @@
#include "cxgbe_compat.h"
#include "t4_hw.h"
+#include "t4vf_hw.h"
#include "t4_chip_type.h"
#include "t4fw_interface.h"
@@ -62,16 +35,16 @@ enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
-enum {
+enum cc_pause {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
-enum {
- FEC_RS = 1 << 0,
- FEC_BASER_RS = 1 << 1,
- FEC_RESERVED = 1 << 2,
+enum cc_fec {
+ FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */
+ FEC_RS = 1 << 1, /* Reed-Solomon */
+ FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */
};
struct port_stats {
@@ -209,12 +182,50 @@ struct arch_specific_params {
u16 mps_tcam_size;
};
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+ unsigned int mode; /* RSS mode */
+ union {
+ struct {
+ uint synmapen:1; /* SYN Map Enable */
+ uint syn4tupenipv6:1; /* en 4-tuple IPv6 SYNs hash */
+ uint syn2tupenipv6:1; /* en 2-tuple IPv6 SYNs hash */
+ uint syn4tupenipv4:1; /* en 4-tuple IPv4 SYNs hash */
+ uint syn2tupenipv4:1; /* en 2-tuple IPv4 SYNs hash */
+ uint ofdmapen:1; /* Offload Map Enable */
+ uint tnlmapen:1; /* Tunnel Map Enable */
+ uint tnlalllookup:1; /* Tunnel All Lookup */
+ uint hashtoeplitz:1; /* use Toeplitz hash */
+ } basicvirtual;
+ } u;
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
struct devlog_params devlog;
+ struct rss_params rss;
+ struct vf_resources vfres;
enum pcie_memwin drv_memwin;
unsigned int sf_size; /* serial flash size in bytes */
@@ -239,19 +250,40 @@ struct adapter_params {
struct arch_specific_params arch; /* chip specific params */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ u8 fw_caps_support; /* 32-bit Port Capabilities */
+};
+
+/* Firmware Port Capabilities types.
+ */
+typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */
+typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */
+
+enum fw_caps {
+ FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */
+ FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */
+ FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */
};
struct link_config {
- unsigned short supported; /* link capabilities */
- unsigned short advertising; /* advertised capabilities */
- unsigned int requested_speed; /* speed user has requested */
- unsigned int speed; /* actual link speed */
- unsigned char requested_fc; /* flow control user has requested */
- unsigned char fc; /* actual link flow control */
- unsigned char requested_fec; /* Forward Error Correction user */
- unsigned char fec; /* has requested and actual FEC */
- unsigned char autoneg; /* autonegotiating? */
- unsigned char link_ok; /* link up? */
+ fw_port_cap32_t pcaps; /* link capabilities */
+ fw_port_cap32_t acaps; /* advertised capabilities */
+
+ u32 requested_speed; /* speed (Mb/s) user has requested */
+ u32 speed; /* actual link speed (Mb/s) */
+
+ enum cc_pause requested_fc; /* flow control user has requested */
+ enum cc_pause fc; /* actual link flow control */
+
+ enum cc_fec auto_fec; /* Forward Error Correction
+ * "automatic" (IEEE 802.3)
+ */
+ enum cc_fec requested_fec; /* Forward Error Correction requested */
+ enum cc_fec fec; /* Forward Error Correction actual */
+
+ unsigned char autoneg; /* autonegotiating? */
+
+ unsigned char link_ok; /* link up? */
+ unsigned char link_down_rc; /* link down reason */
};
#include "adapter.h"
@@ -269,6 +301,11 @@ static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
delay, NULL);
}
+static inline int is_pf4(struct adapter *adap)
+{
+ return adap->pf == 4;
+}
+
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
@@ -285,9 +322,12 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state);
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4vf_fw_reset(struct adapter *adap);
int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
int t4_fl_pkt_align(struct adapter *adap);
+int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2);
+int t4vf_get_vfres(struct adapter *adap);
int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size,
enum chip_type chip_compat);
@@ -297,6 +337,13 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
+int t4vf_query_params(struct adapter *adap, unsigned int nparams,
+ const u32 *params, u32 *vals);
+int t4vf_get_dev_params(struct adapter *adap);
+int t4vf_get_vpd_params(struct adapter *adap);
+int t4vf_get_rss_glb_config(struct adapter *adap);
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals);
int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int nparams, const u32 *params,
@@ -379,6 +426,21 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
}
+int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
+
+static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
+}
+
+static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
+}
+
+
void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals, unsigned int nregs,
unsigned int start_idx);
@@ -394,22 +456,34 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx);
unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p);
void t4_get_port_stats_offset(struct adapter *adap, int idx,
struct port_stats *stats,
struct port_stats *offset);
void t4_clr_port_stats(struct adapter *adap, int idx);
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps);
void t4_reset_link_config(struct adapter *adap, int idx);
int t4_get_version_info(struct adapter *adapter);
void t4_dump_version_info(struct adapter *adapter);
int t4_get_flash_params(struct adapter *adapter);
int t4_get_chip_type(struct adapter *adap, int ver);
int t4_prep_adapter(struct adapter *adapter);
+int t4vf_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+int t4vf_port_init(struct adapter *adap);
int t4_init_rss_mode(struct adapter *adap, int mbox);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
unsigned int flags, unsigned int defq);
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq);
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw);
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx);
+void t4_read_rss_key(struct adapter *adap, u32 *key);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
@@ -421,8 +495,10 @@ int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
unsigned int t4_get_regs_len(struct adapter *adap);
+unsigned int t4vf_get_pf_from_vf(struct adapter *adap);
void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
int t4_seeprom_wp(struct adapter *adapter, int enable);
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgbe/base/t4_chip_type.h b/drivers/net/cxgbe/base/t4_chip_type.h
index cd7a9282..c0c5d0b2 100644
--- a/drivers/net/cxgbe/base/t4_chip_type.h
+++ b/drivers/net/cxgbe/base/t4_chip_type.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_CHIP_TYPE_H__
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 56f38c83..e5ef73b6 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <netinet/in.h>
@@ -55,9 +27,6 @@
#include "t4_regs_values.h"
#include "t4fw_interface.h"
-static void init_link_config(struct link_config *lc, unsigned int pcaps,
- unsigned int acaps);
-
/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
@@ -2167,6 +2136,91 @@ int t4_seeprom_wp(struct adapter *adapter, int enable)
}
/**
+ * t4_fw_tp_pio_rw - Access TP PIO through LDST
+ * @adap: the adapter
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ *
+ * Access TP PIO registers through LDST
+ */
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw)
+{
+ int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ struct fw_ldst_cmd c;
+ unsigned int i;
+ int ret;
+
+ for (i = 0 ; i < nregs; i++) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST |
+ (rw ? F_FW_CMD_READ :
+ F_FW_CMD_WRITE) |
+ V_FW_LDST_CMD_ADDRSPACE(cmd));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+ c.u.addrval.addr = cpu_to_be32(start_index + i);
+ c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ if (rw)
+ vals[i] = be32_to_cpu(c.u.addrval.val);
+ }
+ }
+}
+
+/**
+ * t4_read_rss_key - read the global RSS key
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ *
+ * Reads the global 320-bit RSS key.
+ */
+void t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
+}
+
+/**
+ * t4_write_rss_key - program one of the RSS keys
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ * @idx: which RSS key to write
+ *
+ * Writes one of the RSS keys with the given 320-bit value. If @idx is
+ * 0..15 the corresponding entry in the RSS key table is written,
+ * otherwise the global RSS key is written.
+ */
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
+{
+ u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
+ u8 rss_key_addr_cnt = 16;
+
+ /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+ * allows access to key addresses 16-63 by using KeyWrAddrX
+ * as index[5:4](upper 2) into key table
+ */
+ if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+ (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
+ rss_key_addr_cnt = 32;
+
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
+
+ if (idx >= 0 && idx < rss_key_addr_cnt) {
+ if (rss_key_addr_cnt > 16)
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDRX(idx >> 4) |
+ V_T6_VFWRADDR(idx) | F_KEYWREN);
+ else
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDR(idx) | F_KEYWREN);
+ }
+}
+
+/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
* @mbox: mbox to use for the FW command
@@ -2257,7 +2311,11 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
* Send this portion of the RRS table update to the firmware;
* bail out on any errors.
*/
- ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+ if (is_pf4(adapter))
+ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
+ NULL);
+ else
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
if (ret)
return ret;
}
@@ -2287,7 +2345,44 @@ int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
- return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adapter))
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_read_config_vi_rss - read the configured per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: where to place the configured flags
+ * @defq: where to place the id of the default RSS queue for the VI.
+ *
+ * Read configured VI-specific RSS properties.
+ */
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq)
+{
+ struct fw_rss_vi_config_cmd c;
+ unsigned int result;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
+ if (!ret) {
+ result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
+ if (defq)
+ *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
+ if (flags)
+ *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
+ }
+
+ return ret;
}
/**
@@ -2670,14 +2765,142 @@ void t4_dump_version_info(struct adapter *adapter)
G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
}
-#define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
- FW_PORT_CAP_ANEG)
+#define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
+ FW_PORT_CAP32_ANEG)
+/**
+ * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
+ * @caps16: a 16-bit Port Capabilities value
+ *
+ * Returns the equivalent 32-bit Port Capabilities value.
+ */
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
+{
+ fw_port_cap32_t caps32 = 0;
+
+#define CAP16_TO_CAP32(__cap) \
+ do { \
+ if (caps16 & FW_PORT_CAP_##__cap) \
+ caps32 |= FW_PORT_CAP32_##__cap; \
+ } while (0)
+
+ CAP16_TO_CAP32(SPEED_100M);
+ CAP16_TO_CAP32(SPEED_1G);
+ CAP16_TO_CAP32(SPEED_25G);
+ CAP16_TO_CAP32(SPEED_10G);
+ CAP16_TO_CAP32(SPEED_40G);
+ CAP16_TO_CAP32(SPEED_100G);
+ CAP16_TO_CAP32(FC_RX);
+ CAP16_TO_CAP32(FC_TX);
+ CAP16_TO_CAP32(ANEG);
+ CAP16_TO_CAP32(MDIX);
+ CAP16_TO_CAP32(MDIAUTO);
+ CAP16_TO_CAP32(FEC_RS);
+ CAP16_TO_CAP32(FEC_BASER_RS);
+ CAP16_TO_CAP32(802_3_PAUSE);
+ CAP16_TO_CAP32(802_3_ASM_DIR);
+
+#undef CAP16_TO_CAP32
+
+ return caps32;
+}
+
+/**
+ * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ * @caps32: a 32-bit Port Capabilities value
+ *
+ * Returns the equivalent 16-bit Port Capabilities value. Note that
+ * not all 32-bit Port Capabilities can be represented in the 16-bit
+ * Port Capabilities and some fields/values may not make it.
+ */
+static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+ fw_port_cap16_t caps16 = 0;
+
+#define CAP32_TO_CAP16(__cap) \
+ do { \
+ if (caps32 & FW_PORT_CAP32_##__cap) \
+ caps16 |= FW_PORT_CAP_##__cap; \
+ } while (0)
+
+ CAP32_TO_CAP16(SPEED_100M);
+ CAP32_TO_CAP16(SPEED_1G);
+ CAP32_TO_CAP16(SPEED_10G);
+ CAP32_TO_CAP16(SPEED_25G);
+ CAP32_TO_CAP16(SPEED_40G);
+ CAP32_TO_CAP16(SPEED_100G);
+ CAP32_TO_CAP16(FC_RX);
+ CAP32_TO_CAP16(FC_TX);
+ CAP32_TO_CAP16(802_3_PAUSE);
+ CAP32_TO_CAP16(802_3_ASM_DIR);
+ CAP32_TO_CAP16(ANEG);
+ CAP32_TO_CAP16(MDIX);
+ CAP32_TO_CAP16(MDIAUTO);
+ CAP32_TO_CAP16(FEC_RS);
+ CAP32_TO_CAP16(FEC_BASER_RS);
+
+#undef CAP32_TO_CAP16
+
+ return caps16;
+}
+
+/* Translate Firmware Pause specification to Common Code */
+static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
+{
+ enum cc_pause cc_pause = 0;
+
+ if (fw_pause & FW_PORT_CAP32_FC_RX)
+ cc_pause |= PAUSE_RX;
+ if (fw_pause & FW_PORT_CAP32_FC_TX)
+ cc_pause |= PAUSE_TX;
+
+ return cc_pause;
+}
+
+/* Translate Common Code Pause Frame specification into Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
+{
+ fw_port_cap32_t fw_pause = 0;
+
+ if (cc_pause & PAUSE_RX)
+ fw_pause |= FW_PORT_CAP32_FC_RX;
+ if (cc_pause & PAUSE_TX)
+ fw_pause |= FW_PORT_CAP32_FC_TX;
+
+ return fw_pause;
+}
+
+/* Translate Firmware Forward Error Correction specification to Common Code */
+static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
+{
+ enum cc_fec cc_fec = 0;
+
+ if (fw_fec & FW_PORT_CAP32_FEC_RS)
+ cc_fec |= FEC_RS;
+ if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
+ cc_fec |= FEC_BASER_RS;
+
+ return cc_fec;
+}
+
+/* Translate Common Code Forward Error Correction specification to Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
+{
+ fw_port_cap32_t fw_fec = 0;
+
+ if (cc_fec & FEC_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_RS;
+ if (cc_fec & FEC_BASER_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
+
+ return fw_fec;
+}
/**
* t4_link_l1cfg - apply link configuration to MAC/PHY
- * @phy: the PHY to setup
- * @mac: the MAC to setup
- * @lc: the requested link configuration
+ * @adapter: the adapter
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
*
* Set up a port's MAC and PHY according to a desired link configuration.
* - If the PHY can auto-negotiate first decide what to advertise, then
@@ -2689,48 +2912,60 @@ void t4_dump_version_info(struct adapter *adapter)
int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
- struct fw_port_cmd c;
- unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
- unsigned int fc, fec;
+ unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
+ struct fw_port_cmd cmd;
lc->link_ok = 0;
- fc = 0;
- if (lc->requested_fc & PAUSE_RX)
- fc |= FW_PORT_CAP_FC_RX;
- if (lc->requested_fc & PAUSE_TX)
- fc |= FW_PORT_CAP_FC_TX;
-
- fec = 0;
- if (lc->requested_fec & FEC_RS)
- fec |= FW_PORT_CAP_FEC_RS;
- if (lc->requested_fec & FEC_BASER_RS)
- fec |= FW_PORT_CAP_FEC_BASER_RS;
- if (lc->requested_fec & FEC_RESERVED)
- fec |= FW_PORT_CAP_FEC_RESERVED;
- memset(&c, 0, sizeof(c));
- c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
- F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
- V_FW_PORT_CMD_PORTID(port));
- c.action_to_len16 =
- cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
- FW_LEN16(c));
-
- if (!(lc->supported & FW_PORT_CAP_ANEG)) {
- c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
- fc | fec);
+ fw_fc = cc_to_fwcap_pause(lc->requested_fc);
+
+ /* Convert Common Code Forward Error Control settings into the
+ * Firmware's API. If the current Requested FEC has "Automatic"
+ * (IEEE 802.3) specified, then we use whatever the Firmware
+ * sent us as part of it's IEEE 802.3-based interpratation of
+ * the Transceiver Module EPROM FEC parameters. Otherwise we
+ * use whatever is in the current Requested FEC settings.
+ */
+ if (lc->requested_fec & FEC_AUTO)
+ cc_fec = lc->auto_fec;
+ else
+ cc_fec = lc->requested_fec;
+ fw_fec = cc_to_fwcap_fec(cc_fec);
+
+ /* Figure out what our Requested Port Capabilities are going to be.
+ */
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+ rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
- lc->fec = lc->requested_fec;
+ lc->fec = cc_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
- c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc |
- fec | mdi);
+ rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
- lc->fec = lc->requested_fec;
+ lc->fec = cc_fec;
} else {
- c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi);
+ rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ /* And send that on to the Firmware ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_PORT_CMD_PORTID(port));
+ cmd.action_to_len16 =
+ cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_L1_CFG :
+ FW_PORT_ACTION_L1_CFG32) |
+ FW_LEN16(cmd));
+
+ if (fw_caps == FW_CAPS16)
+ cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
+ else
+ cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
+
+ return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL);
}
/**
@@ -3823,12 +4058,17 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) |
- V_FW_VI_CMD_VFN(vf));
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
+ V_FW_VI_CMD_VFN(vf));
c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
}
/**
@@ -3874,7 +4114,11 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
- return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+ if (is_pf4(adap))
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
+ sleep_ok);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
}
/**
@@ -3921,7 +4165,10 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
V_FW_VI_MAC_CMD_IDX(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret == 0) {
ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
@@ -3955,7 +4202,10 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
V_FW_VI_ENABLE_CMD_EEN(tx_en) |
V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
FW_LEN16(c));
- return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap))
+ return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
}
/**
@@ -3996,15 +4246,20 @@ int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
- V_FW_IQ_CMD_VFN(vf));
+ F_FW_CMD_EXEC);
c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
V_FW_IQ_CMD_IQSTOP(!start) |
FW_LEN16(c));
c.iqid = cpu_to_be16(iqid);
c.fl0id = cpu_to_be16(fl0id);
c.fl1id = cpu_to_be16(fl1id);
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap)) {
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ } else {
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+ }
}
/**
@@ -4028,14 +4283,19 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
- V_FW_IQ_CMD_VFN(vf));
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
c.iqid = cpu_to_be16(iqid);
c.fl0id = cpu_to_be16(fl0id);
c.fl1id = cpu_to_be16(fl1id);
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
}
/**
@@ -4055,12 +4315,179 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
- F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
- V_FW_EQ_ETH_CMD_PFN(pf) |
- V_FW_EQ_ETH_CMD_VFN(vf));
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_link_down_rc_str - return a string for a Link Down Reason Code
+ * @link_down_rc: Link Down Reason Code
+ *
+ * Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+ static const char * const reason[] = {
+ "Link Down",
+ "Remote Fault",
+ "Auto-negotiation Failure",
+ "Reserved",
+ "Insufficient Airflow",
+ "Unable To Determine Reason",
+ "No RX Signal Detected",
+ "Reserved",
+ };
+
+ if (link_down_rc >= ARRAY_SIZE(reason))
+ return "Bad Reason Code";
+
+ return reason[link_down_rc];
+}
+
+/* Return the highest speed set in the port capabilities, in Mb/s. */
+static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
+{
+#define TEST_SPEED_RETURN(__caps_speed, __speed) \
+ do { \
+ if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
+ return __speed; \
+ } while (0)
+
+ TEST_SPEED_RETURN(100G, 100000);
+ TEST_SPEED_RETURN(50G, 50000);
+ TEST_SPEED_RETURN(40G, 40000);
+ TEST_SPEED_RETURN(25G, 25000);
+ TEST_SPEED_RETURN(10G, 10000);
+ TEST_SPEED_RETURN(1G, 1000);
+ TEST_SPEED_RETURN(100M, 100);
+
+#undef TEST_SPEED_RETURN
+
+ return 0;
+}
+
+/**
+ * t4_handle_get_port_info - process a FW reply message
+ * @pi: the port info
+ * @rpl: start of the FW message
+ *
+ * Processes a GET_PORT_INFO FW reply message.
+ */
+static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+ const struct fw_port_cmd *cmd = (const void *)rpl;
+ int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
+ fw_port_cap32_t pcaps, acaps, linkattr;
+ struct link_config *lc = &pi->link_cfg;
+ struct adapter *adapter = pi->adapter;
+ enum fw_port_module_type mod_type;
+ enum fw_port_type port_type;
+ unsigned int speed, fc, fec;
+ int link_ok, linkdnrc;
+
+ /* Extract the various fields from the Port Information message.
+ */
+ switch (action) {
+ case FW_PORT_ACTION_GET_PORT_INFO: {
+ u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
+
+ link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
+ pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
+ acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
+
+ /* Unfortunately the format of the Link Status in the old
+ * 16-bit Port Information message isn't the same as the
+ * 16-bit Port Capabilities bitfield used everywhere else ...
+ */
+ linkattr = 0;
+ if (lstatus & F_FW_PORT_CMD_RXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_RX;
+ if (lstatus & F_FW_PORT_CMD_TXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_TX;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ linkattr |= FW_PORT_CAP32_SPEED_100M;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ linkattr |= FW_PORT_CAP32_SPEED_1G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ linkattr |= FW_PORT_CAP32_SPEED_10G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
+ linkattr |= FW_PORT_CAP32_SPEED_25G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ linkattr |= FW_PORT_CAP32_SPEED_40G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
+ linkattr |= FW_PORT_CAP32_SPEED_100G;
+
+ break;
+ }
+
+ case FW_PORT_ACTION_GET_PORT_INFO32: {
+ u32 lstatus32 =
+ be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
+
+ link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
+ pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd->u.info32.acaps32);
+ linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
+ break;
+ }
+
+ default:
+ dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
+ be32_to_cpu(cmd->action_to_len16));
+ return;
+ }
+
+ fec = fwcap_to_cc_fec(acaps);
+
+ fc = fwcap_to_cc_pause(linkattr);
+ speed = fwcap_to_speed(linkattr);
+
+ if (mod_type != pi->mod_type) {
+ lc->auto_fec = fec;
+ pi->port_type = port_type;
+ pi->mod_type = mod_type;
+ t4_os_portmod_changed(adapter, pi->pidx);
+ }
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc || fec != lc->fec) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ lc->link_down_rc = linkdnrc;
+ dev_warn(adap, "Port %d link down, reason: %s\n",
+ pi->tx_chan, t4_link_down_rc_str(linkdnrc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ lc->fec = fec;
+ lc->pcaps = pcaps;
+ lc->acaps = acaps & ADVERT_MASK;
+
+ if (lc->acaps & FW_PORT_CAP32_ANEG) {
+ lc->autoneg = AUTONEG_ENABLE;
+ } else {
+ /* When Autoneg is disabled, user needs to set
+ * single speed.
+ * Similar to cxgb4_ethtool.c: set_link_ksettings
+ */
+ lc->acaps = 0;
+ lc->requested_speed = fwcap_to_speed(acaps);
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+ }
}
/**
@@ -4084,67 +4511,21 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
unsigned int action =
G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
- if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
+ if (opcode == FW_PORT_CMD &&
+ (action == FW_PORT_ACTION_GET_PORT_INFO ||
+ action == FW_PORT_ACTION_GET_PORT_INFO32)) {
/* link/module state change message */
- unsigned int speed = 0, fc = 0, i;
int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
struct port_info *pi = NULL;
- struct link_config *lc;
- u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
- int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
- u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
-
- if (stat & F_FW_PORT_CMD_RXPAUSE)
- fc |= PAUSE_RX;
- if (stat & F_FW_PORT_CMD_TXPAUSE)
- fc |= PAUSE_TX;
- if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
- speed = ETH_SPEED_NUM_100M;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
- speed = ETH_SPEED_NUM_1G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
- speed = ETH_SPEED_NUM_10G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
- speed = ETH_SPEED_NUM_25G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
- speed = ETH_SPEED_NUM_40G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
- speed = ETH_SPEED_NUM_100G;
+ int i;
for_each_port(adap, i) {
pi = adap2pinfo(adap, i);
if (pi->tx_chan == chan)
break;
}
- lc = &pi->link_cfg;
- if (mod != pi->mod_type) {
- pi->mod_type = mod;
- t4_os_portmod_changed(adap, i);
- }
- if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc) { /* something changed */
- if (!link_ok && lc->link_ok) {
- static const char * const reason[] = {
- "Link Down",
- "Remote Fault",
- "Auto-negotiation Failure",
- "Reserved",
- "Insufficient Airflow",
- "Unable To Determine Reason",
- "No RX Signal Detected",
- "Reserved",
- };
- unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat);
-
- dev_warn(adap, "Port %d link down, reason: %s\n",
- chan, reason[rc]);
- }
- lc->link_ok = link_ok;
- lc->speed = speed;
- lc->fc = fc;
- lc->supported = be16_to_cpu(p->u.info.pcap);
- }
+ t4_handle_get_port_info(pi, rpl);
} else {
dev_warn(adap, "Unknown firmware reply %d\n", opcode);
return -EINVAL;
@@ -4173,12 +4554,10 @@ void t4_reset_link_config(struct adapter *adap, int idx)
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void init_link_config(struct link_config *lc, unsigned int pcaps,
- unsigned int acaps)
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps)
{
- unsigned int fec;
-
- lc->supported = pcaps;
+ lc->pcaps = pcaps;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = 0;
@@ -4188,21 +4567,16 @@ static void init_link_config(struct link_config *lc, unsigned int pcaps,
* For Forward Error Control, we default to whatever the Firmware
* tells us the Link is currently advertising.
*/
- fec = 0;
- if (acaps & FW_PORT_CAP_FEC_RS)
- fec |= FEC_RS;
- if (acaps & FW_PORT_CAP_FEC_BASER_RS)
- fec |= FEC_BASER_RS;
- if (acaps & FW_PORT_CAP_FEC_RESERVED)
- fec |= FEC_RESERVED;
- lc->requested_fec = fec;
- lc->fec = fec;
-
- if (lc->supported & FW_PORT_CAP_ANEG) {
- lc->advertising = lc->supported & ADVERT_MASK;
+ lc->auto_fec = fwcap_to_cc_fec(acaps);
+ lc->requested_fec = FEC_AUTO;
+ lc->fec = lc->auto_fec;
+
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
+ lc->acaps = lc->pcaps & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
} else {
- lc->advertising = 0;
+ lc->acaps = 0;
lc->autoneg = AUTONEG_DISABLE;
}
}
@@ -4723,46 +5097,95 @@ int t4_init_rss_mode(struct adapter *adap, int mbox)
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
- u8 addr[6];
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ struct fw_port_cmd cmd;
int ret, i, j = 0;
- struct fw_port_cmd c;
+ int mdio_addr;
+ u32 action;
+ u8 addr[6];
- memset(&c, 0, sizeof(c));
+ memset(&cmd, 0, sizeof(cmd));
for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
unsigned int rss_size = 0;
- struct port_info *p = adap2pinfo(adap, i);
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
- F_FW_CMD_REQUEST | F_FW_CMD_READ |
- V_FW_PORT_CMD_PORTID(j));
- c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(
- FW_PORT_ACTION_GET_PORT_INFO) |
- FW_LEN16(c));
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ /* If we haven't yet determined whether we're talking to
+ * Firmware which knows the new 32-bit Port Capabilities, it's
+ * time to find out now. This will also tell new Firmware to
+ * send us Port Status Updates using the new 32-bit Port
+ * Capabilities version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val, caps;
+
+ caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32;
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X(caps));
+ val = 1;
+ ret = t4_set_params(adap, mbox, pf, vf, 1, &param,
+ &val);
+ fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16;
+ adap->params.fw_caps_support = fw_caps;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(j));
+ action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32;
+ cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
+ FW_LEN16(cmd));
+ ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
if (ret)
return ret;
+ /* Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus =
+ be32_to_cpu(cmd.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1;
+ pcaps = be16_to_cpu(cmd.u.info.pcap);
+ acaps = be16_to_cpu(cmd.u.info.acap);
+ pcaps = fwcaps16_to_caps32(pcaps);
+ acaps = fwcaps16_to_caps32(acaps);
+ } else {
+ u32 lstatus32 =
+ be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1;
+ pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd.u.info32.acaps32);
+ }
+
ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
if (ret < 0)
return ret;
- p->viid = ret;
- p->tx_chan = j;
- p->rss_size = rss_size;
+ pi->viid = ret;
+ pi->tx_chan = j;
+ pi->rss_size = rss_size;
t4_os_set_hw_addr(adap, i, addr);
- ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
- p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
- G_FW_PORT_CMD_MDIOADDR(ret) : -1;
- p->port_type = G_FW_PORT_CMD_PTYPE(ret);
- p->mod_type = FW_PORT_MOD_TYPE_NA;
+ pi->port_type = port_type;
+ pi->mdio_addr = mdio_addr;
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
- init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap),
- be16_to_cpu(c.u.info.acap));
+ init_link_config(&pi->link_cfg, pcaps, acaps);
j++;
}
return 0;
diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h
index 07498841..ac12afc0 100644
--- a/drivers/net/cxgbe/base/t4_hw.h
+++ b/drivers/net/cxgbe/base/t4_hw.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_HW_H
diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h
index 6acd749a..74b4fc19 100644
--- a/drivers/net/cxgbe/base/t4_msg.h
+++ b/drivers/net/cxgbe/base/t4_msg.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef T4_MSG_H
diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
index 1230e738..5f5cbe04 100644
--- a/drivers/net/cxgbe/base/t4_pci_id_tbl.h
+++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_PCI_ID_TBL_H__
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index 1100e16f..c0d6ddca 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#define MYPF_BASE 0x1b000
@@ -77,6 +49,7 @@
#define SGE_BASE_ADDR 0x1000
#define A_SGE_PF_KDOORBELL 0x0
+#define A_SGE_VF_KDOORBELL 0x0
#define S_QID 15
#define M_QID 0x1ffffU
@@ -103,6 +76,9 @@
#define A_SGE_PF_GTS 0x4
+#define T4VF_SGE_BASE_ADDR 0x0000
+#define A_SGE_VF_GTS 0x4
+
#define S_INGRESSQID 16
#define M_INGRESSQID 0xffffU
#define V_INGRESSQID(x) ((x) << S_INGRESSQID)
@@ -191,6 +167,8 @@
#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)
#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014
+
#define S_ERR_CPL_EXCEED_IQE_SIZE 22
#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)
#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U)
@@ -280,6 +258,11 @@
#define A_SGE_CONM_CTRL 0x1094
+#define S_T6_EGRTHRESHOLDPACKING 16
+#define M_T6_EGRTHRESHOLDPACKING 0xffU
+#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \
+ M_T6_EGRTHRESHOLDPACKING)
+
#define S_EGRTHRESHOLD 8
#define M_EGRTHRESHOLD 0x3fU
#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)
@@ -370,6 +353,7 @@
#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5)
#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8
#define A_SGE_CONTROL2 0x1124
@@ -443,6 +427,8 @@
/* registers for module CIM */
#define CIM_BASE_ADDR 0x7b00
+#define A_CIM_VF_EXT_MAILBOX_CTRL 0x0
+
#define A_CIM_PF_MAILBOX_DATA 0x240
#define A_CIM_PF_MAILBOX_CTRL 0x280
@@ -462,6 +448,8 @@
#define V_UPCRST(x) ((x) << S_UPCRST)
#define F_UPCRST V_UPCRST(1U)
+#define NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16
+
/* registers for module TP */
#define A_TP_OUT_CONFIG 0x7d04
@@ -503,9 +491,34 @@
#define V_MTUVALUE(x) ((x) << S_MTUVALUE)
#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE)
+#define A_TP_RSS_CONFIG_VRT 0x7e00
+
+#define S_KEYMODE 6
+#define M_KEYMODE 0x3U
+#define G_KEYMODE(x) (((x) >> S_KEYMODE) & M_KEYMODE)
+
+#define S_KEYWRADDR 0
+#define V_KEYWRADDR(x) ((x) << S_KEYWRADDR)
+
+#define S_KEYWREN 4
+#define V_KEYWREN(x) ((x) << S_KEYWREN)
+#define F_KEYWREN V_KEYWREN(1U)
+
+#define S_KEYWRADDRX 30
+#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX)
+
+#define S_KEYEXTEND 26
+#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND)
+#define F_KEYEXTEND V_KEYEXTEND(1U)
+
+#define S_T6_VFWRADDR 8
+#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR)
+
#define A_TP_PIO_ADDR 0x7e40
#define A_TP_PIO_DATA 0x7e44
+#define A_TP_RSS_SECRET_KEY0 0x40
+
#define A_TP_VLAN_PRI_MAP 0x140
#define S_FRAGMENTATION 9
@@ -558,8 +571,12 @@
#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR)
#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U)
+#define S_RM_OVLAN 9
+#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN)
+
/* registers for module MPS */
#define MPS_BASE_ADDR 0x9000
+#define T4VF_MPS_BASE_ADDR 0x0100
#define S_REPLICATE 11
#define V_REPLICATE(x) ((x) << S_REPLICATE)
@@ -766,6 +783,66 @@
#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
+#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98
+#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8
+#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0
+#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0
+#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_L 0xe0
+#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
+#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+
+#define A_MPS_PORT0_RX_IVLAN 0x3011c
+
+#define S_IVLAN_ETYPE 0
+#define M_IVLAN_ETYPE 0xffffU
+#define V_IVLAN_ETYPE(x) ((x) << S_IVLAN_ETYPE)
+
+#define MPS_PORT_RX_IVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_IVLAN(idx) \
+ (A_MPS_PORT0_RX_IVLAN + (idx) * MPS_PORT_RX_IVLAN_STRIDE)
+
+#define A_MPS_PORT0_RX_OVLAN0 0x30120
+
+#define S_OVLAN_MASK 16
+#define M_OVLAN_MASK 0xffffU
+#define V_OVLAN_MASK(x) ((x) << S_OVLAN_MASK)
+
+#define S_OVLAN_ETYPE 0
+#define M_OVLAN_ETYPE 0xffffU
+#define V_OVLAN_ETYPE(x) ((x) << S_OVLAN_ETYPE)
+
+#define MPS_PORT_RX_OVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_OVLAN_BASE(idx) \
+(A_MPS_PORT0_RX_OVLAN0 + (idx) * MPS_PORT_RX_OVLAN_STRIDE)
+#define MPS_PORT_RX_OVLAN_REG(idx, reg) (MPS_PORT_RX_OVLAN_BASE(idx) + (reg))
+
+#define A_RX_OVLAN0 0x0
+#define A_RX_OVLAN1 0x4
+#define A_RX_OVLAN2 0x8
+
+#define A_MPS_PORT0_RX_CTL 0x30100
+
+#define S_OVLAN_EN0 0
+#define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0)
+#define F_OVLAN_EN0 V_OVLAN_EN0(1)
+
+#define S_OVLAN_EN1 1
+#define V_OVLAN_EN1(x) ((x) << S_OVLAN_EN1)
+#define F_OVLAN_EN1 V_OVLAN_EN1(1)
+
+#define S_OVLAN_EN2 2
+#define V_OVLAN_EN2(x) ((x) << S_OVLAN_EN2)
+#define F_OVLAN_EN2 V_OVLAN_EN2(1)
+
+#define S_IVLAN_EN 4
+#define V_IVLAN_EN(x) ((x) << S_IVLAN_EN)
+#define F_IVLAN_EN V_IVLAN_EN(1)
+
+#define MPS_PORT_RX_CTL_STRIDE 0x4000
+#define MPS_PORT_RX_CTL(idx) \
+ (A_MPS_PORT0_RX_CTL + (idx) * MPS_PORT_RX_CTL_STRIDE)
+
/* registers for module ULP_RX */
#define ULP_RX_BASE_ADDR 0x19150
@@ -823,6 +900,7 @@
#define F_PFCIM V_PFCIM(1U)
#define A_PL_WHOAMI 0x19400
+#define A_PL_VF_WHOAMI 0x0
#define A_PL_RST 0x19428
@@ -837,6 +915,7 @@
#define F_PIORSTMODE V_PIORSTMODE(1U)
#define A_PL_REV 0x1943c
+#define A_PL_VF_REV 0x4
#define S_REV 0
#define M_REV 0xfU
diff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h
index 9085ff6d..a9414d20 100644
--- a/drivers/net/cxgbe/base/t4_regs_values.h
+++ b/drivers/net/cxgbe/base/t4_regs_values.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_REGS_VALUES_H__
diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h
index 6ca4f318..852e8f3c 100644
--- a/drivers/net/cxgbe/base/t4fw_interface.h
+++ b/drivers/net/cxgbe/base/t4fw_interface.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef _T4FW_INTERFACE_H_
@@ -84,6 +56,8 @@ enum fw_memtype {
enum fw_wr_opcodes {
FW_ETH_TX_PKT_WR = 0x08,
FW_ETH_TX_PKTS_WR = 0x09,
+ FW_ETH_TX_PKT_VM_WR = 0x11,
+ FW_ETH_TX_PKTS_VM_WR = 0x12,
FW_ETH_TX_PKTS2_WR = 0x78,
};
@@ -146,6 +120,29 @@ struct fw_eth_tx_pkts_wr {
__u8 type;
};
+struct fw_eth_tx_pkt_vm_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be32 r3[2];
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+struct fw_eth_tx_pkts_vm_wr {
+ __be32 op_pkd;
+ __be32 equiq_to_len16;
+ __be32 r3;
+ __be16 plen;
+ __u8 npkt;
+ __u8 r4;
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
/******************************************************************************
* C O M M A N D s
*********************/
@@ -171,24 +168,32 @@ struct fw_eth_tx_pkts_wr {
#define FW_CMD_HELLO_RETRIES 3
enum fw_cmd_opcodes {
+ FW_LDST_CMD = 0x01,
FW_RESET_CMD = 0x03,
FW_HELLO_CMD = 0x04,
FW_BYE_CMD = 0x05,
FW_INITIALIZE_CMD = 0x06,
FW_CAPS_CONFIG_CMD = 0x07,
FW_PARAMS_CMD = 0x08,
+ FW_PFVF_CMD = 0x09,
FW_IQ_CMD = 0x10,
FW_EQ_ETH_CMD = 0x12,
FW_VI_CMD = 0x14,
FW_VI_MAC_CMD = 0x15,
FW_VI_RXMODE_CMD = 0x16,
FW_VI_ENABLE_CMD = 0x17,
+ FW_VI_STATS_CMD = 0x1a,
FW_PORT_CMD = 0x1b,
FW_RSS_IND_TBL_CMD = 0x20,
+ FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
FW_DEBUG_CMD = 0x81,
};
+enum fw_cmd_cap {
+ FW_CMD_CAP_PORT = 0x04,
+};
+
/*
* Generic command header flit0
*/
@@ -238,6 +243,94 @@ struct fw_cmd_hdr {
#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16)
+/* address spaces
+ */
+enum fw_ldst_addrspc {
+ FW_LDST_ADDRSPC_TP_PIO = 0x0010,
+};
+
+struct fw_ldst_cmd {
+ __be32 op_to_addrspace;
+ __be32 cycles_to_len16;
+ union fw_ldst {
+ struct fw_ldst_addrval {
+ __be32 addr;
+ __be32 val;
+ } addrval;
+ struct fw_ldst_idctxt {
+ __be32 physid;
+ __be32 msg_ctxtflush;
+ __be32 ctxt_data7;
+ __be32 ctxt_data6;
+ __be32 ctxt_data5;
+ __be32 ctxt_data4;
+ __be32 ctxt_data3;
+ __be32 ctxt_data2;
+ __be32 ctxt_data1;
+ __be32 ctxt_data0;
+ } idctxt;
+ struct fw_ldst_mdio {
+ __be16 paddr_mmd;
+ __be16 raddr;
+ __be16 vctl;
+ __be16 rval;
+ } mdio;
+ struct fw_ldst_mps {
+ __be16 fid_ctl;
+ __be16 rplcpf_pkd;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ __be32 atrb;
+ __be16 vlan[16];
+ } mps;
+ struct fw_ldst_func {
+ __u8 access_ctl;
+ __u8 mod_index;
+ __be16 ctl_id;
+ __be32 offset;
+ __be64 data0;
+ __be64 data1;
+ } func;
+ struct fw_ldst_pcie {
+ __u8 ctrl_to_fn;
+ __u8 bnum;
+ __u8 r;
+ __u8 ext_r;
+ __u8 select_naccess;
+ __u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
+ struct fw_ldst_i2c_deprecated {
+ __u8 pid_pkd;
+ __u8 base;
+ __u8 boffset;
+ __u8 data;
+ __be32 r9;
+ } i2c_deprecated;
+ struct fw_ldst_i2c {
+ __u8 pid;
+ __u8 did;
+ __u8 boffset;
+ __u8 blen;
+ __be32 r9;
+ __u8 data[48];
+ } i2c;
+ struct fw_ldst_le {
+ __be32 index;
+ __be32 r9;
+ __u8 val[33];
+ __u8 r11[7];
+ } le;
+ } u;
+};
+
+#define S_FW_LDST_CMD_ADDRSPACE 0
+#define M_FW_LDST_CMD_ADDRSPACE 0xff
+#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+
struct fw_reset_cmd {
__be32 op_to_write;
__be32 retval_len16;
@@ -386,6 +479,7 @@ struct fw_caps_config_cmd {
enum fw_params_mnem {
FW_PARAMS_MNEM_DEV = 1, /* device params */
FW_PARAMS_MNEM_PFVF = 2, /* function params */
+ FW_PARAMS_MNEM_REG = 3, /* limited register access */
FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
};
@@ -395,6 +489,8 @@ enum fw_params_mnem {
enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */
+ FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
};
@@ -402,7 +498,8 @@ enum fw_params_param_dev {
* physical and virtual function parameters
*/
enum fw_params_param_pfvf {
- FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31
+ FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
+ FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A
};
/*
@@ -443,6 +540,10 @@ enum fw_params_param_dmaq {
#define G_FW_PARAMS_PARAM_YZ(x) \
(((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ)
+#define S_FW_PARAMS_PARAM_XYZ 0
+#define M_FW_PARAMS_PARAM_XYZ 0xffffff
+#define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ)
+
struct fw_params_cmd {
__be32 op_to_vfn;
__be32 retval_len16;
@@ -464,6 +565,68 @@ struct fw_params_cmd {
#define G_FW_PARAMS_CMD_VFN(x) \
(((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN)
+struct fw_pfvf_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ __be32 niqflint_niq;
+ __be32 type_to_neq;
+ __be32 tc_to_nexactf;
+ __be32 r_caps_to_nethctrl;
+ __be16 nricq;
+ __be16 nriqp;
+ __be32 r4;
+};
+
+#define S_FW_PFVF_CMD_NIQFLINT 20
+#define M_FW_PFVF_CMD_NIQFLINT 0xfff
+#define G_FW_PFVF_CMD_NIQFLINT(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT)
+
+#define S_FW_PFVF_CMD_NIQ 0
+#define M_FW_PFVF_CMD_NIQ 0xfffff
+#define G_FW_PFVF_CMD_NIQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ)
+
+#define S_FW_PFVF_CMD_PMASK 20
+#define M_FW_PFVF_CMD_PMASK 0xf
+#define G_FW_PFVF_CMD_PMASK(x) \
+ (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK)
+
+#define S_FW_PFVF_CMD_NEQ 0
+#define M_FW_PFVF_CMD_NEQ 0xfffff
+#define G_FW_PFVF_CMD_NEQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ)
+
+#define S_FW_PFVF_CMD_TC 24
+#define M_FW_PFVF_CMD_TC 0xff
+#define G_FW_PFVF_CMD_TC(x) \
+ (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC)
+
+#define S_FW_PFVF_CMD_NVI 16
+#define M_FW_PFVF_CMD_NVI 0xff
+#define G_FW_PFVF_CMD_NVI(x) \
+ (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI)
+
+#define S_FW_PFVF_CMD_NEXACTF 0
+#define M_FW_PFVF_CMD_NEXACTF 0xffff
+#define G_FW_PFVF_CMD_NEXACTF(x) \
+ (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF)
+
+#define S_FW_PFVF_CMD_R_CAPS 24
+#define M_FW_PFVF_CMD_R_CAPS 0xff
+#define G_FW_PFVF_CMD_R_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS)
+
+#define S_FW_PFVF_CMD_WX_CAPS 16
+#define M_FW_PFVF_CMD_WX_CAPS 0xff
+#define G_FW_PFVF_CMD_WX_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS)
+
+#define S_FW_PFVF_CMD_NETHCTRL 0
+#define M_FW_PFVF_CMD_NETHCTRL 0xffff
+#define G_FW_PFVF_CMD_NETHCTRL(x) \
+ (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL)
+
/*
* ingress queue type; the first 1K ingress queues can have associated 0,
* 1 or 2 free lists and an interrupt, all other ingress queues lack these
@@ -724,6 +887,11 @@ struct fw_eq_eth_cmd {
#define G_FW_EQ_ETH_CMD_EQID(x) \
(((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)
+#define S_FW_EQ_ETH_CMD_PHYSEQID 0
+#define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff
+#define G_FW_EQ_ETH_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID)
+
#define S_FW_EQ_ETH_CMD_FETCHRO 22
#define M_FW_EQ_ETH_CMD_FETCHRO 0x1
#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO)
@@ -988,6 +1156,9 @@ struct fw_vi_enable_cmd {
(((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO)
#define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U)
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS 16
+
/* VI PF stats offset definitions */
#define VI_PF_NUM_STATS 17
enum fw_vi_stats_pf_index {
@@ -1065,7 +1236,16 @@ struct fw_vi_stats_cmd {
} u;
};
-/* port capabilities bitmap */
+#define S_FW_VI_STATS_CMD_VIID 0
+#define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID)
+
+#define S_FW_VI_STATS_CMD_NSTATS 12
+#define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS)
+
+#define S_FW_VI_STATS_CMD_IX 0
+#define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX)
+
+/* old 16-bit port capabilities bitmap */
enum fw_port_cap {
FW_PORT_CAP_SPEED_100M = 0x0001,
FW_PORT_CAP_SPEED_1G = 0x0002,
@@ -1100,9 +1280,45 @@ enum fw_port_mdi {
#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI)
#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI)
+/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */
+#define FW_PORT_CAP32_SPEED_100M 0x00000001UL
+#define FW_PORT_CAP32_SPEED_1G 0x00000002UL
+#define FW_PORT_CAP32_SPEED_10G 0x00000004UL
+#define FW_PORT_CAP32_SPEED_25G 0x00000008UL
+#define FW_PORT_CAP32_SPEED_40G 0x00000010UL
+#define FW_PORT_CAP32_SPEED_50G 0x00000020UL
+#define FW_PORT_CAP32_SPEED_100G 0x00000040UL
+#define FW_PORT_CAP32_FC_RX 0x00010000UL
+#define FW_PORT_CAP32_FC_TX 0x00020000UL
+#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL
+#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL
+#define FW_PORT_CAP32_ANEG 0x00100000UL
+#define FW_PORT_CAP32_MDIX 0x00200000UL
+#define FW_PORT_CAP32_MDIAUTO 0x00400000UL
+#define FW_PORT_CAP32_FEC_RS 0x00800000UL
+#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL
+
+#define S_FW_PORT_CAP32_SPEED 0
+#define M_FW_PORT_CAP32_SPEED 0xfff
+#define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED)
+#define G_FW_PORT_CAP32_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED)
+
+enum fw_port_mdi32 {
+ FW_PORT_CAP32_MDI_AUTO,
+};
+
+#define S_FW_PORT_CAP32_MDI 21
+#define M_FW_PORT_CAP32_MDI 3
+#define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI)
+#define G_FW_PORT_CAP32_MDI(x) \
+ (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI)
+
enum fw_port_action {
FW_PORT_ACTION_L1_CFG = 0x0001,
FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
+ FW_PORT_ACTION_L1_CFG32 = 0x0009,
+ FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a,
};
struct fw_port_cmd {
@@ -1191,6 +1407,18 @@ struct fw_port_cmd {
__be64 r12;
} control;
} dcb;
+ struct fw_port_l1cfg32 {
+ __be32 rcap32;
+ __be32 r;
+ } l1cfg32;
+ struct fw_port_info32 {
+ __be32 lstatus32_to_cbllen32;
+ __be32 auxlinfo32_mtu32;
+ __be32 linkattr32;
+ __be32 pcaps32;
+ __be32 acaps32;
+ __be32 lpacaps32;
+ } info32;
} u;
};
@@ -1264,6 +1492,36 @@ struct fw_port_cmd {
#define G_FW_PORT_CMD_MODTYPE(x) \
(((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE)
+#define S_FW_PORT_CMD_LSTATUS32 31
+#define M_FW_PORT_CMD_LSTATUS32 0x1
+#define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32)
+#define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U)
+
+#define S_FW_PORT_CMD_LINKDNRC32 28
+#define M_FW_PORT_CMD_LINKDNRC32 0x7
+#define G_FW_PORT_CMD_LINKDNRC32(x) \
+ (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32)
+
+#define S_FW_PORT_CMD_MDIOCAP32 26
+#define M_FW_PORT_CMD_MDIOCAP32 0x1
+#define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32)
+#define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR32 21
+#define M_FW_PORT_CMD_MDIOADDR32 0x1f
+#define G_FW_PORT_CMD_MDIOADDR32(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32)
+
+#define S_FW_PORT_CMD_PORTTYPE32 13
+#define M_FW_PORT_CMD_PORTTYPE32 0xff
+#define G_FW_PORT_CMD_PORTTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32)
+
+#define S_FW_PORT_CMD_MODTYPE32 8
+#define M_FW_PORT_CMD_MODTYPE32 0x1f
+#define G_FW_PORT_CMD_MODTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32)
+
/*
* These are configured into the VPD and hence tools that generate
* VPD may use this enumeration.
@@ -1532,6 +1790,83 @@ struct fw_rss_ind_tbl_cmd {
#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \
(((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2)
+struct fw_rss_glb_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_rss_glb_config {
+ struct fw_rss_glb_config_manual {
+ __be32 mode_pkd;
+ __be32 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_glb_config_basicvirtual {
+ __be32 mode_keymode;
+ __be32 synmapen_to_hashtoeplitz;
+ __be64 r8;
+ __be64 r9;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_GLB_CONFIG_CMD_MODE 28
+#define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf
+#define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8
+#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3
+#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \
+ V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0
+#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ)
+#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \
+ V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U)
+
struct fw_rss_vi_config_cmd {
__be32 op_to_viid;
__be32 retval_len16;
diff --git a/drivers/net/cxgbe/base/t4vf_hw.c b/drivers/net/cxgbe/base/t4vf_hw.c
new file mode 100644
index 00000000..9fd0b879
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4vf_hw.c
@@ -0,0 +1,874 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+
+#include "common.h"
+#include "t4_regs.h"
+
+/**
+ * t4vf_wait_dev_ready - wait till to reads of registers work
+ *
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's). Return an error if it doesn't
+ * become ready ...
+ */
+static int t4vf_wait_dev_ready(struct adapter *adapter)
+{
+ const u32 whoami = T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI;
+ const u32 notready1 = 0xffffffff;
+ const u32 notready2 = 0xeeeeeeee;
+ u32 val;
+
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ msleep(500);
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ val);
+ return -EIO;
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
+}
+
+/**
+ * t4vf_wr_mbox_core - send a command to FW through the mailbox
+ * @adapter: the adapter
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the mailbox and waits for the
+ * FW to execute the command. If @rpl is not %NULL it is used to store
+ * the FW's reply to the command. The command and its optional reply
+ * are of the same length. FW can take up to 500 ms to respond.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4vf_wr_mbox_core(struct adapter *adapter,
+ const void __attribute__((__may_alias__)) *cmd,
+ int size, void *rpl, bool sleep_ok)
+{
+ /*
+ * We delay in small increments at first in an effort to maintain
+ * responsiveness for simple, fast executing commands but then back
+ * off to larger delays to a maximum retry delay.
+ */
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+
+ u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL;
+ __be64 cmd_rpl[MBOX_LEN / 8];
+ struct mbox_entry entry;
+ unsigned int delay_idx;
+ u32 v, mbox_data;
+ const __be64 *p;
+ int i, ret;
+ int ms;
+
+ /* In T6, mailbox size is changed to 128 bytes to avoid
+ * invalidating the entire prefetch buffer.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ mbox_data = T4VF_MBDATA_BASE_ADDR;
+ else
+ mbox_data = T6VF_MBDATA_BASE_ADDR;
+
+ /*
+ * Commands must be multiples of 16 bytes in length and may not be
+ * larger than the size of the Mailbox Data register array.
+ */
+ if ((size % 16) != 0 ||
+ size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+ return -EINVAL;
+
+ /*
+ * Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /*
+ * If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rarely
+ * contend on access to the mailbox ...
+ */
+ if (i > (2 * FW_CMD_MAX_TIMEOUT)) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = -EBUSY;
+ return ret;
+ }
+
+ /*
+ * If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (t4_os_list_first_entry(&adapter->mbox_list) == &entry)
+ break;
+
+ /*
+ * Delay for a bit before checking again ...
+ */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+ }
+
+ /*
+ * Loop trying to get ownership of the mailbox. Return an error
+ * if we can't gain ownership.
+ */
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+ for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+
+ if (v != X_MBOWNER_PL) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ return ret;
+ }
+
+ /*
+ * Write the command array into the Mailbox Data register array and
+ * transfer ownership of the mailbox to the firmware.
+ */
+ for (i = 0, p = cmd; i < size; i += 8)
+ t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+
+ t4_read_reg(adapter, mbox_data); /* flush write */
+ t4_write_reg(adapter, mbox_ctl,
+ F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+ t4_read_reg(adapter, mbox_ctl); /* flush write */
+ delay_idx = 0;
+ ms = delay[0];
+
+ /*
+ * Spin waiting for firmware to acknowledge processing our command.
+ */
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+
+ /*
+ * If we're the owner, see if this is the reply we wanted.
+ */
+ v = t4_read_reg(adapter, mbox_ctl);
+ if (G_MBOWNER(v) == X_MBOWNER_PL) {
+ /*
+ * If the Message Valid bit isn't on, revoke ownership
+ * of the mailbox and continue waiting for our reply.
+ */
+ if ((v & F_MBMSGVALID) == 0) {
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ continue;
+ }
+
+ /*
+ * We now have our reply. Extract the command return
+ * value, copy the reply back to our caller's buffer
+ * (if specified) and revoke ownership of the mailbox.
+ * We return the (negated) firmware command return
+ * code (this depends on FW_SUCCESS == 0). (Again we
+ * avoid clogging the log with FW_VI_STATS_CMD
+ * reply results.)
+ */
+
+ /*
+ * Retrieve the command reply and release the mailbox.
+ */
+ get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data);
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+
+ /* return value in high-order host-endian word */
+ v = be64_to_cpu(cmd_rpl[0]);
+
+ if (rpl) {
+ /* request bit in high-order BE word */
+ WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+ & F_FW_CMD_REQUEST) == 0);
+ memcpy(rpl, cmd_rpl, size);
+ }
+ return -((int)G_FW_CMD_RETVAL(v));
+ }
+ }
+
+ /*
+ * We timed out. Return the error ...
+ */
+ dev_err(adapter, "command %#x timed out\n",
+ *(const u8 *)cmd);
+ dev_err(adapter, " Control = %#x\n", t4_read_reg(adapter, mbox_ctl));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+ ret = -ETIMEDOUT;
+ return ret;
+}
+
+/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware resetting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) |
+ F_FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd)));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int t4vf_prep_adapter(struct adapter *adapter)
+{
+ u32 pl_vf_rev;
+ int ret, ver;
+
+ ret = t4vf_wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Default port and clock for debugging in case we can't reach
+ * firmware.
+ */
+ adapter->params.nports = 1;
+ adapter->params.vfres.pmask = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ pl_vf_rev = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ adapter->params.pci.device_id = adapter->pdev->id.device_id;
+ adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
+
+ /*
+ * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
+ * ADAPTER (VERSION << 4 | REVISION)
+ */
+ ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
+ adapter->params.chip = 0;
+ switch (ver) {
+ case CHELSIO_T5:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ default:
+ dev_err(adapter, "%s: Device %d is not supported\n",
+ __func__, adapter->params.pci.device_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_query_params - query FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Reads the values of firmware or device parameters. Up to 7 parameters
+ * can be queried at once.
+ */
+int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
+{
+ struct fw_params_cmd cmd, rpl;
+ struct fw_params_param *p;
+ unsigned int i;
+ size_t len16;
+ int ret;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
+ p->mnem = cpu_to_be32(*params++);
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret == 0)
+ for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
+ *vals++ = be32_to_cpu(p->val);
+ return ret;
+}
+
+/**
+ * t4vf_get_vpd_params - retrieve device VPD paremeters
+ * @adapter: the adapter
+ *
+ * Retrives various device Vital Product Data parameters. The parameters
+ * are stored in @adapter->params.vpd.
+ */
+int t4vf_get_vpd_params(struct adapter *adapter)
+{
+ struct vpd_params *vpd_params = &adapter->params.vpd;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ vpd_params->cclk = vals[0];
+ dev_debug(adapter, "%s: vpd_params->cclk = %u\n",
+ __func__, vpd_params->cclk);
+ return 0;
+}
+
+/**
+ * t4vf_get_dev_params - retrieve device paremeters
+ * @adapter: the adapter
+ *
+ * Retrives fw and tp version.
+ */
+int t4vf_get_dev_params(struct adapter *adapter)
+{
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+ v = t4vf_query_params(adapter, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ adapter->params.fw_vers = vals[0];
+ adapter->params.tp_vers = vals[1];
+
+ dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
+
+ dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
+ return 0;
+}
+
+/**
+ * t4vf_set_params - sets FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Sets the values of firmware or device parameters. Up to 7 parameters
+ * can be specified at once.
+ */
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals)
+{
+ struct fw_params_param *p;
+ struct fw_params_cmd cmd;
+ unsigned int i;
+ size_t len16;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
+ p->mnem = cpu_to_be32(*params++);
+ p->val = cpu_to_be32(*vals++);
+ }
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_fl_pkt_align - return the fl packet alignment
+ * @adapter: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control,
+ u32 sge_control2)
+{
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+ else
+ ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adapter->params.chip)) {
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
+ return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami));
+}
+
+/**
+ * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ * @adapter: the adapter
+ *
+ * Retrieves global RSS mode and parameters with which we have to live
+ * and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+ struct rss_params *rss = &adapter->params.rss;
+ struct fw_rss_glb_config_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute an RSS Global Configuration read command to retrieve
+ * our RSS configuration.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Translate the big-endian RSS Global Configuration into our
+ * cpu-endian format based on the RSS mode. We also do first level
+ * filtering at this point to weed out modes which don't support
+ * VF Drivers ...
+ */
+ rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE
+ (be32_to_cpu(rpl.u.manual.mode_pkd));
+ switch (rss->mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = be32_to_cpu
+ (rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+ rss->u.basicvirtual.synmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+ rss->u.basicvirtual.syn4tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn2tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn4tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+ rss->u.basicvirtual.syn2tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+ rss->u.basicvirtual.ofdmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+ rss->u.basicvirtual.tnlmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+ rss->u.basicvirtual.tnlalllookup =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+ rss->u.basicvirtual.hashtoeplitz =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+
+ /* we need at least Tunnel Map Enable to be set */
+ if (!rss->u.basicvirtual.tnlmapen)
+ return -EINVAL;
+ break;
+ }
+
+ default:
+ /* all unknown/unsupported RSS modes result in an error */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_get_vfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a virtual
+ * function. The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract VF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+ vfres->niq = G_FW_PFVF_CMD_NIQ(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ vfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ vfres->pmask = G_FW_PFVF_CMD_PMASK(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ vfres->tc = G_FW_PFVF_CMD_TC(word);
+ vfres->nvi = G_FW_PFVF_CMD_NVI(word);
+ vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
+ vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
+ vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats_fw - collect "port" statistics via Firmware
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface via Firmware
+ * commands.
+ */
+static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ unsigned int rem = VI_VF_NUM_STATS;
+ struct fw_vi_stats_vf fwstats;
+ __be64 *fwsp = (__be64 *)&fwstats;
+
+ /*
+ * Grab the Virtual Interface statistics a chunk at a time via mailbox
+ * commands. We could use a Work Request and get all of them at once
+ * but that's an asynchronous interface which is awkward to use.
+ */
+ while (rem) {
+ unsigned int ix = VI_VF_NUM_STATS - rem;
+ unsigned int nstats = min(6U, rem);
+ struct fw_vi_stats_cmd cmd, rpl;
+ size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
+ sizeof(struct fw_vi_stats_ctl));
+ size_t len16 = DIV_ROUND_UP(len, 16);
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_STATS_CMD) |
+ V_FW_VI_STATS_CMD_VIID(pi->viid) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ cmd.u.ctl.nstats_ix =
+ cpu_to_be16(V_FW_VI_STATS_CMD_IX(ix) |
+ V_FW_VI_STATS_CMD_NSTATS(nstats));
+ ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
+
+ rem -= nstats;
+ fwsp += nstats;
+ }
+
+ /*
+ * Translate firmware statistics into host native statistics.
+ */
+ p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
+ p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
+ p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
+ p->tx_drop = be64_to_cpu(fwstats.tx_drop_frames);
+
+ p->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
+ p->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
+ p->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
+ p->rx_len_err = be64_to_cpu(fwstats.rx_err_frames);
+
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats - collect "port" statistics
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface.
+ */
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ /*
+ * If this is not the first Virtual Interface for our Virtual
+ * Function, we need to use Firmware commands to retrieve its
+ * MPS statistics.
+ */
+ if (pidx != 0)
+ t4vf_get_port_stats_fw(adapter, pidx, p);
+
+ /*
+ * But for the first VI, we can grab its statistics via the MPS
+ * register mapped into the VF register space.
+ */
+#define GET_STAT(name) \
+ t4_read_reg64(adapter, \
+ T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L)
+ p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
+ p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
+ p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
+ p->tx_drop = GET_STAT(TX_VF_DROP_FRAMES);
+
+ p->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
+ p->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
+ p->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
+
+ p->rx_len_err = GET_STAT(RX_VF_ERR_FRAMES);
+#undef GET_STAT
+}
+
+static int t4vf_alloc_vi(struct adapter *adapter, int port_id)
+{
+ struct fw_vi_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute a VI command to allocate Virtual Interface and return its
+ * VIID.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE |
+ F_FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+ F_FW_VI_CMD_ALLOC);
+ cmd.portid_pkd = V_FW_VI_CMD_PORTID(port_id);
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+ return G_FW_VI_CMD_VIID(be16_to_cpu(rpl.type_to_viid));
+}
+
+int t4vf_port_init(struct adapter *adapter)
+{
+ unsigned int fw_caps = adapter->params.fw_caps_support;
+ struct fw_port_cmd port_cmd, port_rpl;
+ struct fw_vi_cmd vi_cmd, vi_rpl;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ int mdio_addr;
+ int ret, i;
+
+ for_each_port(adapter, i) {
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ /*
+ * If we haven't yet determined if we're talking to Firmware
+ * which knows the new 32-bit Port Caps, it's time to find
+ * out now. This will also tell new Firmware to send us Port
+ * Status Updates using the new 32-bit Port Capabilities
+ * version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val;
+
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X
+ (FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
+ val = 1;
+ ret = t4vf_set_params(adapter, 1, &param, &val);
+ fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
+ adapter->params.fw_caps_support = fw_caps;
+ }
+
+ ret = t4vf_alloc_vi(adapter, p->port_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot allocate VI for port %d:"
+ " err=%d\n", p->port_id, ret);
+ return ret;
+ }
+ p->viid = ret;
+
+ /*
+ * Execute a VI Read command to get our Virtual Interface
+ * information like MAC address, etc.
+ */
+ memset(&vi_cmd, 0, sizeof(vi_cmd));
+ vi_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
+ vi_cmd.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(p->viid));
+ ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ p->rss_size = G_FW_VI_CMD_RSSSIZE
+ (be16_to_cpu(vi_rpl.norss_rsssize));
+ t4_os_set_hw_addr(adapter, i, vi_rpl.mac);
+
+ /*
+ * If we don't have read access to our port information, we're
+ * done now. Else, execute a PORT Read command to get it ...
+ */
+ if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
+ return 0;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32
+ (V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(p->port_id));
+ port_cmd.action_to_len16 = cpu_to_be32
+ (V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32) |
+ FW_LEN16(port_cmd));
+ ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ /*
+ * Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus = be32_to_cpu
+ (port_rpl.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) :
+ -1);
+ pcaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.pcap));
+ acaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.acap));
+ } else {
+ u32 lstatus32 = be32_to_cpu
+ (port_rpl.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1);
+ pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32);
+ acaps = be32_to_cpu(port_rpl.u.info32.acaps32);
+ }
+
+ p->port_type = port_type;
+ p->mdio_addr = mdio_addr;
+ p->mod_type = FW_PORT_MOD_TYPE_NA;
+ init_link_config(&p->link_cfg, pcaps, acaps);
+ }
+ return 0;
+}
diff --git a/drivers/net/cxgbe/base/t4vf_hw.h b/drivers/net/cxgbe/base/t4vf_hw.h
new file mode 100644
index 00000000..55e436e7
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4vf_hw.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4VF_HW_H
+#define __T4VF_HW_H
+
+#define T4VF_PL_BASE_ADDR 0x0200
+#define T4VF_CIM_BASE_ADDR 0x0300
+#define T4VF_MBDATA_BASE_ADDR 0x0240
+#define T6VF_MBDATA_BASE_ADDR 0x0280
+
+#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES NUM_CIM_PF_MAILBOX_DATA_INSTANCES
+#endif /* __T4VF_HW_H */
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index f9891545..e4a52560 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef _CXGBE_H_
@@ -46,12 +18,25 @@
#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */
#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
+#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */
+#define CXGBE_RSS_HF_ALL (ETH_RSS_IPV4 | ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP)
+
+#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
+#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up"
+
+bool force_linkup(struct adapter *adap);
int cxgbe_probe(struct adapter *adapter);
+int cxgbevf_probe(struct adapter *adapter);
void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
int cxgbe_up(struct adapter *adap);
int cxgbe_down(struct port_info *pi);
void cxgbe_close(struct adapter *adapter);
void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats);
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats);
void cxgbe_stats_reset(struct port_info *pi);
int link_start(struct port_info *pi);
void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
@@ -59,7 +44,11 @@ void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
int setup_sge_fwevtq(struct adapter *adapter);
void cfg_queues(struct rte_eth_dev *eth_dev);
int cfg_queue_count(struct rte_eth_dev *eth_dev);
+int init_rss(struct adapter *adap);
int setup_rss(struct port_info *pi);
void cxgbe_enable_rx_queues(struct port_info *pi);
+void print_port_info(struct adapter *adap);
+void print_adapter_info(struct adapter *adap);
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key);
#endif /* _CXGBE_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h
index 03bba9fe..779bcf16 100644
--- a/drivers/net/cxgbe/cxgbe_compat.h
+++ b/drivers/net/cxgbe/cxgbe_compat.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef _CXGBE_COMPAT_H_
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 5cd260f4..61115e26 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <sys/queue.h>
@@ -63,6 +35,7 @@
#include <rte_dev.h>
#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
/*
* Macros needed to support the PCI Device ID Table ...
@@ -85,8 +58,21 @@
*/
#include "t4_pci_id_tbl.h"
-static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT |\
+ DEV_TX_OFFLOAD_IPV4_CKSUM |\
+ DEV_TX_OFFLOAD_UDP_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_TSO)
+
+#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP |\
+ DEV_RX_OFFLOAD_CRC_STRIP |\
+ DEV_RX_OFFLOAD_IPV4_CKSUM |\
+ DEV_RX_OFFLOAD_JUMBO_FRAME |\
+ DEV_RX_OFFLOAD_UDP_CKSUM |\
+ DEV_RX_OFFLOAD_TCP_CKSUM)
+
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
uint16_t pkts_sent, pkts_remain;
@@ -119,8 +105,8 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return total_sent;
}
-static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
unsigned int work_done;
@@ -135,8 +121,8 @@ static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return work_done;
}
-static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
- struct rte_eth_dev_info *device_info)
+void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -148,8 +134,6 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
.nb_align = 1,
};
- device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
-
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
device_info->max_rx_queues = max_queues;
@@ -159,25 +143,22 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->max_vfs = adapter->params.arch.vfcount;
device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
- device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ device_info->rx_queue_offload_capa = 0UL;
+ device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
- device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ device_info->tx_queue_offload_capa = 0UL;
+ device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
device_info->reta_size = pi->rss_size;
+ device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
+ device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
device_info->rx_desc_lim = cxgbe_desc_lim;
device_info->tx_desc_lim = cxgbe_desc_lim;
cxgbe_get_speed_caps(pi, &device_info->speed_capa);
}
-static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -186,7 +167,7 @@ static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1, -1, 1, -1, false);
}
-static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -195,7 +176,7 @@ static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
0, -1, 1, -1, false);
}
-static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -206,7 +187,7 @@ static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
-1, 1, 1, -1, false);
}
-static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -217,28 +198,26 @@ static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
-1, 0, 1, -1, false);
}
-static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
- __rte_unused int wait_to_complete)
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
- struct rte_eth_link *old_link = &eth_dev->data->dev_link;
+ struct rte_eth_link new_link;
unsigned int work_done, budget = 4;
cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
- if (old_link->link_status == pi->link_cfg.link_ok)
- return -1; /* link not changed */
- eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = pi->link_cfg.speed;
+ new_link.link_status = force_linkup(adapter) ?
+ ETH_LINK_UP : pi->link_cfg.link_ok;
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_speed = pi->link_cfg.speed;
- /* link has changed */
- return 0;
+ return rte_eth_linkstatus_set(eth_dev, &new_link);
}
-static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -254,9 +233,11 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
/* set to jumbo mode if needed */
if (new_mtu > ETHER_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
-1, -1, true);
@@ -266,21 +247,13 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
return err;
}
-static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id);
-static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id);
-static void cxgbe_dev_tx_queue_release(void *q);
-static void cxgbe_dev_rx_queue_release(void *q);
-
/*
* Stop device.
*/
-static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
- int i, dev_down = 0;
CXGBE_FUNC_TRACE();
@@ -294,28 +267,12 @@ static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
* have been disabled
*/
t4_sge_eth_clear_queues(pi);
-
- /* See if all ports are down */
- for_each_port(adapter, i) {
- pi = adap2pinfo(adapter, i);
- /*
- * Skip first port of the adapter since it will be closed
- * by DPDK
- */
- if (i == 0)
- continue;
- dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0;
- }
-
- /* If rest of the ports are stopped, then free up resources */
- if (dev_down == (adapter->params.nports - 1))
- cxgbe_close(adapter);
}
/* Start the device.
* It returns 0 on success.
*/
-static int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -367,7 +324,7 @@ out:
/*
* Stop device: disable rx and tx functions to allow for reconfiguring.
*/
-static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -386,13 +343,20 @@ static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
t4_sge_eth_clear_queues(pi);
}
-static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
+ uint64_t configured_offloads;
int err;
CXGBE_FUNC_TRACE();
+ configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+ dev_info(adapter, "can't disable hw crc strip\n");
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_CRC_STRIP;
+ }
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = setup_sge_fwevtq(adapter);
@@ -408,8 +372,7 @@ static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
return 0;
}
-static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id)
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
int ret;
struct sge_eth_txq *txq = (struct sge_eth_txq *)
@@ -424,8 +387,7 @@ static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id)
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
int ret;
struct sge_eth_txq *txq = (struct sge_eth_txq *)
@@ -440,10 +402,10 @@ static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
- uint16_t queue_idx, uint16_t nb_desc,
- unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -452,8 +414,6 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
int err = 0;
unsigned int temp_nb_desc;
- RTE_SET_USED(tx_conf);
-
dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
__func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
socket_id, pi->first_qset);
@@ -488,13 +448,12 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
s->fw_evtq.cntxt_id, socket_id);
- dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
- __func__, txq->q.cntxt_id, err);
-
+ dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
+ __func__, txq->q.cntxt_id, txq->q.abs_id, err);
return err;
}
-static void cxgbe_dev_tx_queue_release(void *q)
+void cxgbe_dev_tx_queue_release(void *q)
{
struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
@@ -510,8 +469,7 @@ static void cxgbe_dev_tx_queue_release(void *q)
}
}
-static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t rx_queue_id)
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
int ret;
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -530,8 +488,7 @@ static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
- uint16_t rx_queue_id)
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
int ret;
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -549,11 +506,11 @@ static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
- uint16_t queue_idx, uint16_t nb_desc,
- unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mp)
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -565,8 +522,6 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info dev_info;
unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
- RTE_SET_USED(rx_conf);
-
dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
__func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
socket_id, mp);
@@ -613,21 +568,25 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
/* Set to jumbo mode if necessary */
if (pkt_len > ETHER_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
&rxq->fl, t4_ethrx_handler,
- t4_get_tp_ch_map(adapter, pi->tx_chan), mp,
+ is_pf4(adapter) ?
+ t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
queue_idx, socket_id);
- dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
- __func__, err, pi->port_id, rxq->rspq.cntxt_id);
+ dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
+ __func__, err, pi->port_id, rxq->rspq.cntxt_id,
+ rxq->rspq.abs_id);
return err;
}
-static void cxgbe_dev_rx_queue_release(void *q)
+void cxgbe_dev_rx_queue_release(void *q)
{
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
struct sge_rspq *rq = &rxq->rspq;
@@ -750,7 +709,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct adapter *adapter = pi->adapter;
struct link_config *lc = &pi->link_cfg;
- if (lc->supported & FW_PORT_CAP_ANEG) {
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
if (fc_conf->autoneg)
lc->requested_fc |= PAUSE_AUTONEG;
else
@@ -773,7 +732,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
&pi->link_cfg);
}
-static const uint32_t *
+const uint32_t *
cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
{
static const uint32_t ptypes[] = {
@@ -787,6 +746,88 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
+/* Update RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int err;
+
+ err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
+ if (err)
+ return err;
+
+ pi->rss_hf = rss_conf->rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = cpu_to_be32(key[i]);
+
+ t4_write_rss_key(adapter, mod_key, -1);
+ }
+
+ return 0;
+}
+
+/* Get RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ u64 rss_hf = 0;
+ u64 flags = 0;
+ int err;
+
+ err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ &flags, NULL);
+
+ if (err)
+ return err;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ rss_hf |= ETH_RSS_IPV6;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ rss_hf |= ETH_RSS_IPV4;
+
+ rss_conf->rss_hf = rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ t4_read_rss_key(adapter, key);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = be32_to_cpu(key[i]);
+
+ memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+ }
+
+ return 0;
+}
+
static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
{
RTE_SET_USED(dev);
@@ -956,6 +997,23 @@ static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
return 0;
}
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
+ pi->xact_addr_filt, (u8 *)addr, true, true);
+ if (ret < 0) {
+ dev_err(adapter, "failed to set mac addr; err = %d\n",
+ ret);
+ return ret;
+ }
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_start = cxgbe_dev_start,
.dev_stop = cxgbe_dev_stop,
@@ -985,6 +1043,9 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.get_eeprom = cxgbe_get_eeprom,
.set_eeprom = cxgbe_set_eeprom,
.get_reg = cxgbe_get_regs,
+ .rss_hash_update = cxgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
};
/*
@@ -1004,14 +1065,34 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &cxgbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- /* for secondary processes, we don't initialise any further as primary
- * has already done this work.
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
*/
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
return 0;
-
- pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ }
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
@@ -1043,6 +1124,16 @@ out_free_adapter:
return err;
}
+static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ /* Free up other ports and all resources */
+ cxgbe_close(adap);
+ return 0;
+}
+
static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
@@ -1052,7 +1143,7 @@ static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
}
static struct rte_pci_driver rte_cxgbe_pmd = {
@@ -1065,3 +1156,6 @@ static struct rte_pci_driver rte_cxgbe_pmd = {
RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
+ CXGBE_DEVARG_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> ");
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 28db6c06..54eb23df 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <sys/queue.h>
@@ -57,9 +29,9 @@
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
-#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
+#include <rte_kvargs.h>
#include "common.h"
#include "t4_regs.h"
@@ -199,15 +171,16 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
static inline bool is_x_1g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_1G) != 0;
+ return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
}
static inline bool is_x_10g_port(const struct link_config *lc)
{
unsigned int speeds, high_speeds;
- speeds = V_FW_PORT_CAP_SPEED(G_FW_PORT_CAP_SPEED(lc->supported));
- high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+ speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
+ high_speeds = speeds &
+ ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
return high_speeds != 0;
}
@@ -345,14 +318,17 @@ static void setup_memwin(struct adapter *adap)
MEMWIN_NIC));
}
-static int init_rss(struct adapter *adap)
+int init_rss(struct adapter *adap)
{
unsigned int i;
- int err;
- err = t4_init_rss_mode(adap, adap->mbox);
- if (err)
- return err;
+ if (is_pf4(adap)) {
+ int err;
+
+ err = t4_init_rss_mode(adap, adap->mbox);
+ if (err)
+ return err;
+ }
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -360,6 +336,8 @@ static int init_rss(struct adapter *adap)
pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
if (!pi->rss)
return -ENOMEM;
+
+ pi->rss_hf = CXGBE_RSS_HF_ALL;
}
return 0;
}
@@ -367,7 +345,7 @@ static int init_rss(struct adapter *adap)
/**
* Dump basic information about the adapter.
*/
-static void print_adapter_info(struct adapter *adap)
+void print_adapter_info(struct adapter *adap)
{
/**
* Hardware/Firmware/etc. Version/Revision IDs.
@@ -375,27 +353,29 @@ static void print_adapter_info(struct adapter *adap)
t4_dump_version_info(adap);
}
-static void print_port_info(struct adapter *adap)
+void print_port_info(struct adapter *adap)
{
int i;
char buf[80];
struct rte_pci_addr *loc = &adap->pdev->addr;
for_each_port(adap, i) {
- const struct port_info *pi = &adap->port[i];
+ const struct port_info *pi = adap2pinfo(adap, i);
char *bufp = buf;
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
bufp += sprintf(bufp, "100M/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
bufp += sprintf(bufp, "1G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
bufp += sprintf(bufp, "10G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
bufp += sprintf(bufp, "25G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
bufp += sprintf(bufp, "40G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
+ bufp += sprintf(bufp, "50G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
@@ -412,6 +392,84 @@ static void print_port_info(struct adapter *adap)
}
}
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+ struct rte_kvargs *kvlist;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (rte_kvargs_process(kvlist, key,
+ check_devargs_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static void configure_vlan_types(struct adapter *adapter)
+{
+ struct rte_pci_device *pdev = adapter->pdev;
+ int i;
+
+ for_each_port(adapter, i) {
+ /* OVLAN Type 0x88a8 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x88a8));
+ /* OVLAN Type 0x9100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x9100));
+ /* OVLAN Type 0x8100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x8100));
+
+ /* IVLAN 0X8100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
+ V_IVLAN_ETYPE(M_IVLAN_ETYPE),
+ V_IVLAN_ETYPE(0x8100));
+
+ t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_OVLAN_EN2 | F_IVLAN_EN,
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_OVLAN_EN2 | F_IVLAN_EN);
+ }
+
+ if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN))
+ t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
+ V_RM_OVLAN(1), V_RM_OVLAN(0));
+}
+
static void configure_pcie_ext_tag(struct adapter *adapter)
{
u16 v;
@@ -828,6 +886,7 @@ static int adap_init0(struct adapter *adap)
t4_init_sge_params(adap);
t4_init_tp_params(adap);
configure_pcie_ext_tag(adap);
+ configure_vlan_types(adap);
adap->params.drv_memwin = MEMWIN_NIC;
adap->flags |= FW_OK;
@@ -860,7 +919,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
};
- const struct port_info *pi = &adap->port[port_id];
+ const struct port_info *pi = adap2pinfo(adap, port_id);
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
@@ -881,6 +940,18 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
pi->port_id, pi->mod_type);
}
+inline bool force_linkup(struct adapter *adap)
+{
+ struct rte_pci_device *pdev = adap->pdev;
+
+ if (is_pf4(adap))
+ return false; /* force_linkup not required for pf driver*/
+ if (!cxgbe_get_devargs(pdev->device.devargs,
+ CXGBE_DEVARG_FORCE_LINK_UP))
+ return false;
+ return true;
+}
+
/**
* link_start - enable a port
* @dev: the port to enable
@@ -912,7 +983,7 @@ int link_start(struct port_info *pi)
ret = 0;
}
}
- if (ret == 0)
+ if (ret == 0 && is_pf4(adapter))
ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
&pi->link_cfg);
if (ret == 0) {
@@ -926,18 +997,78 @@ int link_start(struct port_info *pi)
ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
true, true, false);
}
+
+ if (ret == 0 && force_linkup(adapter))
+ pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
return ret;
}
/**
- * cxgb4_write_rss - write the RSS table for a given port
+ * cxgbe_write_rss_conf - flash the RSS configuration for a given port
+ * @pi: the port
+ * @rss_hf: Hash configuration to apply
+ */
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
+{
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+ u64 flags = 0;
+ u16 rss;
+ int err;
+
+ /* Should never be called before setting up sge eth rx queues */
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ dev_err(adap, "%s No RXQs available on port %d\n",
+ __func__, pi->port_id);
+ return -EINVAL;
+ }
+
+ /* Don't allow unsupported hash functions */
+ if (rss_hf & ~CXGBE_RSS_HF_ALL)
+ return -EINVAL;
+
+ if (rss_hf & ETH_RSS_IPV4)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ if (rss_hf & ETH_RSS_IPV6)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rss = rxq[0].rspq.abs_id;
+
+ /* If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ flags, rss);
+ return err;
+}
+
+/**
+ * cxgbe_write_rss - write the RSS table for a given port
* @pi: the port
* @queues: array of queue indices for RSS
*
* Sets up the portion of the HW RSS table for the port's VI to distribute
* packets to the Rx queues in @queues.
*/
-int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
{
u16 *rss;
int i, err;
@@ -958,20 +1089,6 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
pi->rss_size, rss, pi->rss_size);
- /*
- * If Tunnel All Lookup isn't specified in the global RSS
- * Configuration, then we need to specify a default Ingress
- * Queue for any ingress packets which aren't hashed. We'll
- * use our first ingress queue ...
- */
- if (!err)
- err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
- F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_UDPEN,
- rss[0]);
rte_free(rss);
return err;
}
@@ -1001,7 +1118,11 @@ int setup_rss(struct port_info *pi)
for (j = 0; j < pi->rss_size; j++)
pi->rss[j] = j % pi->n_rx_qsets;
- err = cxgb4_write_rss(pi, pi->rss);
+ err = cxgbe_write_rss(pi, pi->rss);
+ if (err)
+ return err;
+
+ err = cxgbe_write_rss_conf(pi, pi->rss_hf);
if (err)
return err;
pi->flags |= PORT_RSS_DONE;
@@ -1016,7 +1137,8 @@ int setup_rss(struct port_info *pi)
static void enable_rx(struct adapter *adap, struct sge_rspq *q)
{
/* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
+ t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
V_SEINTARM(q->intr_params) |
V_INGRESSQID(q->cntxt_id));
}
@@ -1051,7 +1173,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
#define FW_CAPS_TO_SPEED(__fw_name) \
do { \
- if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
+ if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
SET_SPEED(__fw_name); \
} while (0)
@@ -1106,6 +1228,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
case FW_PORT_TYPE_CR4_QSFP:
FW_CAPS_TO_SPEED(SPEED_25G);
FW_CAPS_TO_SPEED(SPEED_40G);
+ FW_CAPS_TO_SPEED(SPEED_50G);
FW_CAPS_TO_SPEED(SPEED_100G);
break;
@@ -1128,10 +1251,10 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
{
*speed_caps = 0;
- fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.supported,
+ fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
speed_caps);
- if (!(pi->link_cfg.supported & FW_PORT_CAP_ANEG))
+ if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
*speed_caps |= ETH_LINK_SPEED_FIXED;
}
@@ -1147,7 +1270,8 @@ int cxgbe_up(struct adapter *adap)
{
enable_rx(adap, &adap->sge.fw_evtq);
t4_sge_tx_monitor_start(adap);
- t4_intr_enable(adap);
+ if (is_pf4(adap))
+ t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
/* TODO: deadman watchdog ?? */
@@ -1168,7 +1292,7 @@ int cxgbe_down(struct port_info *pi)
return err;
}
- t4_reset_link_config(adapter, pi->port_id);
+ t4_reset_link_config(adapter, pi->pidx);
return 0;
}
@@ -1181,7 +1305,8 @@ void cxgbe_close(struct adapter *adapter)
int i;
if (adapter->flags & FULL_INIT_DONE) {
- t4_intr_disable(adapter);
+ if (is_pf4(adapter))
+ t4_intr_disable(adapter);
t4_sge_tx_monitor_stop(adapter);
t4_free_sge_resources(adapter);
for_each_port(adapter, i) {
@@ -1190,11 +1315,16 @@ void cxgbe_close(struct adapter *adapter)
t4_free_vi(adapter, adapter->mbox,
adapter->pf, 0, pi->viid);
rte_free(pi->eth_dev->data->mac_addrs);
+ /* Skip first port since it'll be freed by DPDK stack */
+ if (i) {
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
}
adapter->flags &= ~FULL_INIT_DONE;
}
- if (adapter->flags & FW_OK)
+ if (is_pf4(adapter) && (adapter->flags & FW_OK))
t4_fw_bye(adapter, adapter->mbox);
}
@@ -1265,21 +1395,16 @@ int cxgbe_probe(struct adapter *adapter)
}
for_each_port(adapter, i) {
- char name[RTE_ETH_NAME_MAX_LEN];
- struct rte_eth_dev_data *data = NULL;
const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
- pi = &adapter->port[i];
- pi->adapter = adapter;
- pi->xact_addr_filt = -1;
- pi->port_id = i;
-
- snprintf(name, sizeof(name), "cxgbe%d",
- adapter->eth_dev->data->port_id + i);
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
if (i == 0) {
/* First port is already allocated by DPDK */
- pi->eth_dev = adapter->eth_dev;
+ eth_dev = adapter->eth_dev;
goto allocate_mac;
}
@@ -1289,21 +1414,26 @@ int cxgbe_probe(struct adapter *adapter)
*/
/* reserve an ethdev entry */
- pi->eth_dev = rte_eth_dev_allocate(name);
- if (!pi->eth_dev)
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
goto out_free;
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (!data)
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
goto out_free;
- data->port_id = adapter->eth_dev->data->port_id + i;
-
- pi->eth_dev->data = data;
-
allocate_mac:
+ pi = (struct port_info *)eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = i;
+ pi->pidx = i;
+
pi->eth_dev->device = &adapter->pdev->device;
- pi->eth_dev->data->dev_private = pi;
pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
@@ -1318,6 +1448,11 @@ allocate_mac:
err = -1;
goto out_free;
}
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
}
if (adapter->flags & FW_OK) {
@@ -1349,8 +1484,11 @@ out_free:
/* Skip first port since it'll be de-allocated by DPDK */
if (i == 0)
continue;
- if (pi->eth_dev->data)
- rte_free(pi->eth_dev->data);
+ if (pi->eth_dev) {
+ if (pi->eth_dev->data->dev_private)
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
}
if (adapter->flags & FW_OK)
diff --git a/drivers/net/cxgbe/cxgbe_pfvf.h b/drivers/net/cxgbe/cxgbe_pfvf.h
new file mode 100644
index 00000000..2bba9742
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_pfvf.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_PFVF_H_
+#define _CXGBE_PFVF_H_
+
+void cxgbe_dev_rx_queue_release(void *q);
+void cxgbe_dev_tx_queue_release(void *q);
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info);
+void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev);
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr);
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete);
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev);
+#endif /* _CXGBE_PFVF_H_ */
diff --git a/drivers/net/cxgbe/cxgbevf_ethdev.c b/drivers/net/cxgbe/cxgbevf_ethdev.c
new file mode 100644
index 00000000..a942ba6b
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbevf_ethdev.c
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+
+#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static const struct rte_pci_id cxgb4vf_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x8
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { .vendor_id = 0, } \
+ }
+
+/*
+ *... and the PCI ID Table itself ...
+ */
+#include "t4_pci_id_tbl.h"
+
+/*
+ * Get port statistics.
+ */
+static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *eth_stats)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct port_stats ps;
+ unsigned int i;
+
+ cxgbevf_stats_get(pi, &ps);
+
+ /* RX Stats */
+ eth_stats->ierrors = ps.rx_len_err;
+
+ /* TX Stats */
+ eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames +
+ ps.tx_ucast_frames;
+ eth_stats->oerrors = ps.tx_drop;
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ eth_stats->q_ipackets[i] = rxq->stats.pkts;
+ eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ eth_stats->ipackets += eth_stats->q_ipackets[i];
+ eth_stats->ibytes += eth_stats->q_ibytes[i];
+ }
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ eth_stats->q_opackets[i] = txq->stats.pkts;
+ eth_stats->q_obytes[i] = txq->stats.tx_bytes;
+ eth_stats->q_errors[i] = txq->stats.mapping_err;
+ }
+ return 0;
+}
+
+static const struct eth_dev_ops cxgbevf_eth_dev_ops = {
+ .dev_start = cxgbe_dev_start,
+ .dev_stop = cxgbe_dev_stop,
+ .dev_close = cxgbe_dev_close,
+ .promiscuous_enable = cxgbe_dev_promiscuous_enable,
+ .promiscuous_disable = cxgbe_dev_promiscuous_disable,
+ .allmulticast_enable = cxgbe_dev_allmulticast_enable,
+ .allmulticast_disable = cxgbe_dev_allmulticast_disable,
+ .dev_configure = cxgbe_dev_configure,
+ .dev_infos_get = cxgbe_dev_info_get,
+ .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
+ .link_update = cxgbe_dev_link_update,
+ .mtu_set = cxgbe_dev_mtu_set,
+ .tx_queue_setup = cxgbe_dev_tx_queue_setup,
+ .tx_queue_start = cxgbe_dev_tx_queue_start,
+ .tx_queue_stop = cxgbe_dev_tx_queue_stop,
+ .tx_queue_release = cxgbe_dev_tx_queue_release,
+ .rx_queue_setup = cxgbe_dev_rx_queue_setup,
+ .rx_queue_start = cxgbe_dev_rx_queue_start,
+ .rx_queue_stop = cxgbe_dev_rx_queue_stop,
+ .rx_queue_release = cxgbe_dev_rx_queue_release,
+ .stats_get = cxgbevf_dev_stats_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
+};
+
+/*
+ * Initialize driver
+ * It returns 0 on success.
+ */
+static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct adapter *adapter = NULL;
+ int err = 0;
+
+ CXGBE_FUNC_TRACE();
+
+ eth_dev->dev_ops = &cxgbevf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "cxgbevfadapter%d",
+ eth_dev->data->port_id);
+ adapter = rte_zmalloc(name, sizeof(*adapter), 0);
+ if (!adapter)
+ return -1;
+
+ adapter->use_unpacked_mode = 1;
+ adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ if (!adapter->regs) {
+ dev_err(adapter, "%s: cannot map device registers\n", __func__);
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->pdev = pci_dev;
+ adapter->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ err = cxgbevf_probe(adapter);
+ if (err) {
+ dev_err(adapter, "%s: cxgbevf probe failed with err %d\n",
+ __func__, err);
+ goto out_free_adapter;
+ }
+
+ return 0;
+
+out_free_adapter:
+ rte_free(adapter);
+ return err;
+}
+
+static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info),
+ eth_cxgbevf_dev_init);
+}
+
+static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_cxgbevf_pmd = {
+ .id_table = cxgb4vf_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_cxgbevf_pci_probe,
+ .remove = eth_cxgbevf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");
diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c
new file mode 100644
index 00000000..5b3fb539
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbevf_main.c
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgbe.h"
+
+/*
+ * Figure out how many Ports and Queue Sets we can support. This depends on
+ * knowing our Virtual Function Resources and may be called a second time if
+ * we fall back from MSI-X to MSI Interrupt Mode.
+ */
+static void size_nports_qsets(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ unsigned int ethqsets, pmask_nports;
+
+ /*
+ * The number of "ports" which we support is equal to the number of
+ * Virtual Interfaces with which we've been provisioned.
+ */
+ adapter->params.nports = vfres->nvi;
+ if (adapter->params.nports > MAX_NPORTS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
+ " allowed virtual interfaces\n", MAX_NPORTS,
+ adapter->params.nports);
+ adapter->params.nports = MAX_NPORTS;
+ }
+
+ /*
+ * We may have been provisioned with more VIs than the number of
+ * ports we're allowed to access (our Port Access Rights Mask).
+ * This is obviously a configuration conflict but we don't want to
+ * do anything silly just because of that.
+ */
+ pmask_nports = hweight32(adapter->params.vfres.pmask);
+ if (pmask_nports < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ " virtual interfaces; limited by Port Access Rights"
+ " mask %#x\n", pmask_nports, adapter->params.nports,
+ adapter->params.vfres.pmask);
+ adapter->params.nports = pmask_nports;
+ }
+
+ /*
+ * We need to reserve an Ingress Queue for the Asynchronous Firmware
+ * Event Queue.
+ *
+ * For each Queue Set, we'll need the ability to allocate two Egress
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
+ * Ethernet Queue.
+ */
+ ethqsets = vfres->niqflint - 1;
+ if (vfres->nethctrl != ethqsets)
+ ethqsets = min(vfres->nethctrl, ethqsets);
+ if (vfres->neq < ethqsets * 2)
+ ethqsets = vfres->neq / 2;
+ if (ethqsets > MAX_ETH_QSETS)
+ ethqsets = MAX_ETH_QSETS;
+ adapter->sge.max_ethqsets = ethqsets;
+
+ if (adapter->sge.max_ethqsets < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d available"
+ " virtual interfaces (too few Queue Sets)\n",
+ adapter->sge.max_ethqsets, adapter->params.nports);
+ adapter->params.nports = adapter->sge.max_ethqsets;
+ }
+}
+
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats)
+{
+ t4vf_get_port_stats(pi->adapter, pi->pidx, stats);
+}
+
+static int adap_init0vf(struct adapter *adapter)
+{
+ u32 param, val = 0;
+ int err;
+
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Grab basic operational parameters. These will predominantly have
+ * been set up by the Physical Function Driver or will be hard coded
+ * into the adapter. We just have to live with them ... Note that
+ * we _must_ get our VPD parameters before our SGE parameters because
+ * we need to know the adapter's core clock from the VPD in order to
+ * properly decode the SGE Timer Values.
+ */
+ err = t4vf_get_dev_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " device parameters: err=%d\n", err);
+ return err;
+ }
+
+ err = t4vf_get_vpd_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " VPD parameters: err=%d\n", err);
+ return err;
+ }
+
+ adapter->pf = t4vf_get_pf_from_vf(adapter);
+ err = t4vf_sge_init(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "error in sge init\n");
+ return err;
+ }
+
+ err = t4vf_get_rss_glb_config(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " RSS parameters: err=%d\n", err);
+ return err;
+ }
+ if (adapter->params.rss.mode !=
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ dev_err(adapter->pdev_dev, "unable to operate with global RSS"
+ " mode %d\n", adapter->params.rss.mode);
+ return -EINVAL;
+ }
+
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
+ val = 1;
+ t4vf_set_params(adapter, 1, &param, &val);
+
+ /*
+ * Grab our Virtual Interface resource allocation, extract the
+ * features that we're interested in and do a bit of sanity testing on
+ * what we discover.
+ */
+ err = t4vf_get_vfres(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to get virtual interface"
+ " resources: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Check for various parameter sanity issues.
+ */
+ if (adapter->params.vfres.pmask == 0) {
+ dev_err(adapter->pdev_dev, "no port access configured\n"
+ "usable!\n");
+ return -EINVAL;
+ }
+ if (adapter->params.vfres.nvi == 0) {
+ dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
+ "usable!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize nports and max_ethqsets now that we have our Virtual
+ * Function Resources.
+ */
+ size_nports_qsets(adapter);
+ adapter->flags |= FW_OK;
+ return 0;
+}
+
+int cxgbevf_probe(struct adapter *adapter)
+{
+ struct port_info *pi;
+ unsigned int pmask;
+ int err = 0;
+ int i;
+
+ t4_os_lock_init(&adapter->mbox_lock);
+ TAILQ_INIT(&adapter->mbox_list);
+ err = t4vf_prep_adapter(adapter);
+ if (err)
+ return err;
+
+ if (!is_t4(adapter->params.chip)) {
+ adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ if (!adapter->bar2) {
+ dev_err(adapter, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ return err;
+ }
+ }
+
+ err = adap_init0vf(adapter);
+ if (err) {
+ dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ pmask = adapter->params.vfres.pmask;
+ for_each_port(adapter, i) {
+ const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+ int port_id;
+
+ if (pmask == 0)
+ break;
+ port_id = ffs(pmask) - 1;
+ pmask &= ~(1 << port_id);
+
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
+
+ if (i == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = adapter->eth_dev;
+ goto allocate_mac;
+ }
+
+ /*
+ * now do all data allocation - for eth_dev structure,
+ * and internal (private) data for the remaining ports
+ */
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
+ goto out_free;
+
+allocate_mac:
+ pi = (struct port_info *)eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = port_id;
+ pi->pidx = i;
+
+ pi->eth_dev->device = &adapter->pdev->device;
+ pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
+ pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
+ pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
+ pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
+ ETHER_ADDR_LEN, 0);
+ if (!pi->eth_dev->data->mac_addrs) {
+ dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
+ __func__);
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
+ }
+
+ if (adapter->flags & FW_OK) {
+ err = t4vf_port_init(adapter);
+ if (err) {
+ dev_err(adapter, "%s: t4_port_init failed with err %d\n",
+ __func__, err);
+ goto out_free;
+ }
+ }
+
+ cfg_queues(adapter->eth_dev);
+ print_adapter_info(adapter);
+ print_port_info(adapter);
+
+ err = init_rss(adapter);
+ if (err)
+ goto out_free;
+ return 0;
+
+out_free:
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
+ /* Skip first port since it'll be de-allocated by DPDK */
+ if (i == 0)
+ continue;
+ if (pi->eth_dev) {
+ if (pi->eth_dev->data->dev_private)
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
+ }
+ return -err;
+}
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 3d5aa596..b5d3611d 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <sys/queue.h>
@@ -337,7 +309,11 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
* mechanism.
*/
if (unlikely(!q->bar2_addr)) {
- t4_write_reg_relaxed(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_KDOORBELL;
+
+ t4_write_reg_relaxed(adap, reg,
val | V_QID(q->cntxt_id));
} else {
writel_relaxed(val | V_QID(q->bar2_qid),
@@ -385,7 +361,8 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
struct rte_mbuf *buf_bulk[n];
int ret, i;
struct rte_pktmbuf_pool_private *mbp_priv;
- u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
+ u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
/* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
@@ -570,12 +547,16 @@ static inline int is_eth_imm(const struct rte_mbuf *m)
/**
* calc_tx_flits - calculate the number of flits for a packet Tx WR
* @m: the packet
+ * @adap: adapter structure pointer
*
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
+static inline unsigned int calc_tx_flits(const struct rte_mbuf *m,
+ struct adapter *adap)
{
+ size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) :
+ sizeof(struct fw_eth_tx_pkt_vm_wr);
unsigned int flits;
int hdrlen;
@@ -600,11 +581,10 @@ static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
*/
flits = sgl_len(m->nb_segs);
if (m->tso_segsz)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
+ flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
else
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+ flits += (wr_size +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
return flits;
}
@@ -848,14 +828,20 @@ static void tx_timer_cb(void *data)
static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
struct sge_eth_txq *txq)
{
- u32 wr_mid;
- struct sge_txq *q = &txq->q;
+ struct fw_eth_tx_pkts_vm_wr *vmwr;
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
struct fw_eth_tx_pkts_wr *wr;
+ struct sge_txq *q = &txq->q;
unsigned int ndesc;
+ u32 wr_mid;
/* fill the pkts WR header */
wr = (void *)&q->desc[q->pidx];
wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ vmwr = (void *)&q->desc[q->pidx];
wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
ndesc = flits_to_desc(q->coalesce.flits);
@@ -863,12 +849,18 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
wr->plen = cpu_to_be16(q->coalesce.len);
wr->npkt = q->coalesce.idx;
wr->r3 = 0;
- wr->type = q->coalesce.type;
+ if (is_pf4(adap)) {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ wr->type = q->coalesce.type;
+ } else {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
+ vmwr->r4 = 0;
+ memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst,
+ fw_hdr_copy_len);
+ }
/* zero out coalesce structure members */
- q->coalesce.idx = 0;
- q->coalesce.flits = 0;
- q->coalesce.len = 0;
+ memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce));
txq_advance(q, ndesc);
txq->stats.coal_wr++;
@@ -896,13 +888,27 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
unsigned int *nflits,
struct adapter *adap)
{
+ struct fw_eth_tx_pkts_vm_wr *wr;
+ const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci));
struct sge_txq *q = &txq->q;
unsigned int flits, ndesc;
unsigned char type = 0;
- int credits;
+ int credits, wr_size;
/* use coal WR type 1 when no frags are present */
type = (mbuf->nb_segs == 1) ? 1 : 0;
+ if (!is_pf4(adap)) {
+ if (!type)
+ return 0;
+
+ if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *),
+ fw_hdr_copy_len))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ }
if (unlikely(type != q->coalesce.type && q->coalesce.idx))
ship_tx_pkt_coalesce_wr(adap, txq);
@@ -948,16 +954,21 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
new:
/* start a new pkts WR, the WR header is not filled below */
- flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64);
+ wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) :
+ sizeof(struct fw_eth_tx_pkts_vm_wr);
+ flits += wr_size / sizeof(__be64);
ndesc = flits_to_desc(q->coalesce.flits + flits);
credits = txq_avail(q) - ndesc;
if (unlikely(credits < 0 || wraps_around(q, ndesc)))
return 0;
- q->coalesce.flits += 2;
+ q->coalesce.flits += wr_size / sizeof(__be64);
q->coalesce.type = type;
q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
- 2 * sizeof(__be64);
+ q->coalesce.flits * sizeof(__be64);
+ if (!is_pf4(adap))
+ memcpy((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len);
return 1;
}
@@ -987,6 +998,8 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
struct cpl_tx_pkt_core *cpl;
struct tx_sw_desc *sd;
unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
+ unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM :
+ ETH_COALESCE_VF_PKT_NUM;
#ifdef RTE_LIBRTE_CXGBE_TPUT
RTE_SET_USED(nb_pkts);
@@ -1030,9 +1043,12 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
}
- cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) |
- V_TXPKT_PF(adap->pf));
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id));
cpl->pack = htons(0);
cpl->len = htons(len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1061,7 +1077,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
sd->coalesce.idx = (idx & 1) + 1;
/* send the coaelsced work request if max reached */
- if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM
+ if (++q->coalesce.idx == max_coal_pkt_num
#ifndef RTE_LIBRTE_CXGBE_TPUT
|| q->coalesce.idx >= nb_pkts
#endif
@@ -1085,6 +1101,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
struct adapter *adap;
struct rte_mbuf *m = mbuf;
struct fw_eth_tx_pkt_wr *wr;
+ struct fw_eth_tx_pkt_vm_wr *vmwr;
struct cpl_tx_pkt_core *cpl;
struct tx_sw_desc *d;
dma_addr_t addr[m->nb_segs];
@@ -1095,7 +1112,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
u32 wr_mid;
u64 cntrl, *end;
bool v6;
- u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
/* Reject xmit if queue is stopped */
if (unlikely(txq->flags & EQ_STOPPED))
@@ -1115,7 +1132,7 @@ out_free:
(unlikely(m->pkt_len > max_pkt_len)))
goto out_free;
- pi = (struct port_info *)txq->eth_dev->data->dev_private;
+ pi = (struct port_info *)txq->data->dev_private;
adap = pi->adapter;
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
@@ -1141,7 +1158,7 @@ out_free:
if (txq->q.coalesce.idx)
ship_tx_pkt_coalesce_wr(adap, txq);
- flits = calc_tx_flits(m);
+ flits = calc_tx_flits(m, adap);
ndesc = flits_to_desc(flits);
credits = txq_avail(&txq->q) - ndesc;
@@ -1163,31 +1180,55 @@ out_free:
}
wr = (void *)&txq->q.desc[txq->q.pidx];
+ vmwr = (void *)&txq->q.desc[txq->q.pidx];
wr->equiq_to_len16 = htonl(wr_mid);
- wr->r3 = rte_cpu_to_be_64(0);
- end = (u64 *)wr + flits;
+ if (is_pf4(adap)) {
+ wr->r3 = rte_cpu_to_be_64(0);
+ end = (u64 *)wr + flits;
+ } else {
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
+
+ vmwr->r3[0] = rte_cpu_to_be_32(0);
+ vmwr->r3[1] = rte_cpu_to_be_32(0);
+ memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *),
+ fw_hdr_copy_len);
+ end = (u64 *)vmwr + flits;
+ }
len = 0;
len += sizeof(*cpl);
/* Coalescing skipped and we send through normal path */
if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
- wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
V_FW_WR_IMMDLEN(len));
- cpl = (void *)(wr + 1);
+ if (is_pf4(adap))
+ cpl = (void *)(wr + 1);
+ else
+ cpl = (void *)(vmwr + 1);
if (m->ol_flags & PKT_TX_IP_CKSUM) {
cntrl = hwcsum(adap->params.chip, m) |
F_TXPKT_IPCSUM_DIS;
txq->stats.tx_cso++;
}
} else {
- lso = (void *)(wr + 1);
+ if (is_pf4(adap))
+ lso = (void *)(wr + 1);
+ else
+ lso = (void *)(vmwr + 1);
v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
l3hdr_len = m->l3_len;
l4hdr_len = m->l4_len;
eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
len += sizeof(*lso);
- wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
V_FW_WR_IMMDLEN(len));
lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
@@ -1221,9 +1262,14 @@ out_free:
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
}
- cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) |
- V_TXPKT_PF(adap->pf));
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) |
+ V_TXPKT_PF(0));
+
cpl->pack = htons(0);
cpl->len = htons(m->pkt_len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1299,7 +1345,8 @@ static void *alloc_ring(size_t nelem, size_t elem_size,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
+ tz = rte_memzone_reserve_aligned(z_name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, 4096);
if (!tz)
return NULL;
@@ -1468,6 +1515,7 @@ static int process_responses(struct sge_rspq *q, int budget,
rsp_type = G_RSPD_TYPE(rc->u.type_gen);
if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
+ struct sge *s = &q->adapter->sge;
unsigned int stat_pidx;
int stat_pidx_diff;
@@ -1550,10 +1598,12 @@ static int process_responses(struct sge_rspq *q, int budget,
}
if (cpl->vlan_ex) {
- pkt->ol_flags |= PKT_RX_VLAN;
+ pkt->ol_flags |= PKT_RX_VLAN |
+ PKT_RX_VLAN_STRIPPED;
pkt->vlan_tci = ntohs(cpl->vlan);
}
+ rte_pktmbuf_adj(pkt, s->pktshift);
rxq->stats.pkts++;
rxq->stats.rx_bytes += pkt->pkt_len;
rx_pkts[budget - budget_left] = pkt;
@@ -1612,7 +1662,11 @@ int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
if (unlikely(!q->bar2_addr)) {
- t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
+ u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_GTS;
+
+ t4_write_reg(q->adapter, reg,
val | V_INGRESSQID((u32)q->cntxt_id));
} else {
writel(val | V_INGRESSQID(q->bar2_qid),
@@ -1689,6 +1743,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
unsigned int nb_refill;
+ u8 pciechan;
/* Size needs to be multiple of 16, including status entry. */
iq->size = cxgbe_roundup(iq->size, 16);
@@ -1706,8 +1761,19 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_WRITE | F_FW_CMD_EXEC |
- V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+
+ if (is_pf4(adap)) {
+ pciechan = cong > 0 ? cxgbe_ffs(cong) - 1 : pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
+ V_FW_IQ_CMD_VFN(0));
+ if (cong >= 0)
+ c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ F_FW_IQ_CMD_IQRO);
+ } else {
+ pciechan = pi->port_id;
+ }
+
c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
(sizeof(c) / 16));
c.type_to_iqandstindex =
@@ -1719,16 +1785,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize =
- htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :
- pi->tx_chan) |
+ htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
- if (cong >= 0)
- c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
- F_FW_IQ_CMD_IQRO);
if (fl) {
struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
@@ -1768,7 +1830,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
0 : F_FW_IQ_CMD_FL0PACKEN) |
F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
F_FW_IQ_CMD_FL0PADEN);
- if (cong >= 0)
+ if (is_pf4(adap) && cong >= 0)
c.iqns_to_fl0congen |=
htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
F_FW_IQ_CMD_FL0CONGCIF |
@@ -1789,7 +1851,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.fl0addr = cpu_to_be64(fl->addr);
}
- ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret)
goto err;
@@ -1806,7 +1871,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->stat = (void *)&iq->desc[iq->size * 8];
iq->eth_dev = eth_dev;
iq->handler = hnd;
- iq->port_id = pi->port_id;
+ iq->port_id = pi->pidx;
iq->mb_pool = mp;
/* set offset to -1 to distinguish ingress queues without FL */
@@ -1846,7 +1911,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* a lot easier to fix in one place ... For now we do something very
* simple (and hopefully less wrong).
*/
- if (!is_t4(adap->params.chip) && cong >= 0) {
+ if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
u32 param, val;
int i;
@@ -1893,9 +1958,11 @@ err:
return ret;
}
-static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,
+ unsigned int abs_id)
{
q->cntxt_id = id;
+ q->abs_id = abs_id;
q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
&q->bar2_qid);
q->cidx = 0;
@@ -1943,6 +2010,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
+ u8 pciechan;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -1961,16 +2029,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_WRITE | F_FW_CMD_EXEC |
- V_FW_EQ_ETH_CMD_PFN(adap->pf) |
- V_FW_EQ_ETH_CMD_VFN(0));
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |
+ V_FW_EQ_ETH_CMD_VFN(0));
+ } else {
+ pciechan = pi->port_id;
+ }
+
c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
V_FW_EQ_ETH_CMD_VIID(pi->viid));
c.fetchszm_to_iqid =
htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |
F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
c.dcaen_to_eqsize =
htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
@@ -1978,7 +2052,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
V_FW_EQ_ETH_CMD_EQSIZE(nentries));
c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret) {
rte_free(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -1986,7 +2063,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return ret;
}
- init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)));
+ init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),
+ G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));
txq->stats.tso = 0;
txq->stats.pkts = 0;
txq->stats.tx_cso = 0;
@@ -1997,6 +2075,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->stats.mapping_err = 0;
txq->flags |= EQ_STOPPED;
txq->eth_dev = eth_dev;
+ txq->data = eth_dev->data;
t4_os_lock_init(&txq->txq_lock);
return 0;
}
@@ -2280,3 +2359,182 @@ int t4_sge_init(struct adapter *adap)
return 0;
}
+
+int t4vf_sge_init(struct adapter *adap)
+{
+ struct sge_params *sge_params = &adap->params.sge;
+ u32 sge_ingress_queues_per_page;
+ u32 sge_egress_queues_per_page;
+ u32 sge_control, sge_control2;
+ u32 fl_small_pg, fl_large_pg;
+ u32 sge_ingress_rx_threshold;
+ u32 sge_timer_value_0_and_1;
+ u32 sge_timer_value_2_and_3;
+ u32 sge_timer_value_4_and_5;
+ u32 sge_congestion_control;
+ struct sge *s = &adap->sge;
+ unsigned int s_hps, s_qpp;
+ u32 sge_host_page_size;
+ u32 params[7], vals[7];
+ int v;
+
+ /* query basic params from fw */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
+ params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));
+ params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));
+ params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
+ v = t4vf_query_params(adap, 7, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+
+ sge_control = vals[0];
+ sge_host_page_size = vals[1];
+ fl_small_pg = vals[2];
+ fl_large_pg = vals[3];
+ sge_timer_value_0_and_1 = vals[4];
+ sge_timer_value_2_and_3 = vals[5];
+ sge_timer_value_4_and_5 = vals[6];
+
+ /*
+ * Start by vetting the basic SGE parameters which have been set up by
+ * the Physical Function Driver.
+ */
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+
+ if ((sge_control & F_RXPKTCPLMODE) !=
+ V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+
+ /* Grab ingress packing boundary from SGE_CONTROL2 for */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
+ v = t4vf_query_params(adap, 1, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_err(adapter, "Unable to get SGE Control2; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_control2 = vals[0];
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ sge_ingress_rx_threshold = vals[0];
+ sge_congestion_control = vals[1];
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_warn(adap, "Unable to get VF SGE Queues/Page; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_egress_queues_per_page = vals[0];
+ sge_ingress_queues_per_page = vals[1];
+
+ /*
+ * We need the Queues/Page for our VF. This is based on the
+ * PF from which we're instantiated and is indexed in the
+ * register we just read.
+ */
+ s_hps = (S_HOSTPAGESIZEPF0 +
+ (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);
+ sge_params->hps =
+ ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);
+
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);
+ sge_params->eq_qpp =
+ ((sge_egress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+ sge_params->iq_qpp =
+ ((sge_ingress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+
+ /*
+ * Now translate the queried parameters into our internal forms.
+ */
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+ s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)
+ ? 128 : 64);
+ s->pktshift = G_PKTSHIFT(sge_control);
+ s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T5:
+ s->fl_starve_thres =
+ G_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ case CHELSIO_T6:
+ default:
+ s->fl_starve_thres =
+ G_T6_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ }
+ s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
+
+ /*
+ * Save RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ s->timer_val[0] = core_ticks_to_us(adap,
+ G_TIMERVALUE0(sge_timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ G_TIMERVALUE1(sge_timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ G_TIMERVALUE2(sge_timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ G_TIMERVALUE3(sge_timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ G_TIMERVALUE4(sge_timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ G_TIMERVALUE5(sge_timer_value_4_and_5));
+ s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);
+ s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);
+ s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);
+ s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);
+ return 0;
+}
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
index 9c2a5ea8..d7a0a50c 100644
--- a/drivers/net/dpaa/Makefile
+++ b/drivers/net/dpaa/Makefile
@@ -27,6 +27,9 @@ EXPORT_MAP := rte_pmd_dpaa_version.map
LIBABIVER := 1
+# depends on dpaa bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# Interfaces with DPDK
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 9b69ef45..d014a11a 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -45,14 +45,42 @@
#include <fsl_bman.h>
#include <fsl_fman.h>
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_MT_LOCKFREE |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
-/* At present we only allow up to 4 push mode queues - as each of this queue
- * need dedicated portal and we are short of portals.
+/* At present we only allow up to 4 push mode queues as default - as each of
+ * this queue need dedicated portal and we are short of portals.
*/
-#define DPAA_MAX_PUSH_MODE_QUEUE 4
+#define DPAA_MAX_PUSH_MODE_QUEUE 8
+#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
-static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
@@ -95,6 +123,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
static struct rte_dpaa_driver rte_dpaa_pmd;
+static void
+dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
+
static inline void
dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
{
@@ -122,9 +153,11 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
return -EINVAL;
if (frame_size > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -134,13 +167,32 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
static int
-dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+dpaa_eth_dev_configure(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint64_t tx_offloads = eth_conf->txmode.offloads;
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ /* Rx offloads validation */
+ if (dev_rx_offloads_nodis & ~rx_offloads) {
+ DPAA_PMD_WARN(
+ "Rx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ rx_offloads, dev_rx_offloads_nodis);
+ }
+
+ /* Tx offloads validation */
+ if (dev_tx_offloads_nodis & ~tx_offloads) {
+ DPAA_PMD_WARN(
+ "Tx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ tx_offloads, dev_tx_offloads_nodis);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
DPAA_MAX_RX_PKT_LEN) {
fman_if_set_maxfrm(dpaa_intf->fif,
@@ -256,14 +308,12 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
dev_info->speed_capa = (ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_10G);
- dev_info->rx_offload_capa =
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- dev_info->tx_offload_capa =
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ dev_info->rx_offload_capa = dev_rx_offloads_sup |
+ dev_rx_offloads_nodis;
+ dev_info->tx_offload_capa = dev_tx_offloads_sup |
+ dev_tx_offloads_nodis;
+ dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
+ dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
}
static int dpaa_eth_link_update(struct rte_eth_dev *dev,
@@ -275,9 +325,9 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (dpaa_intf->fif->mac_type == fman_mac_1g)
- link->link_speed = 1000;
+ link->link_speed = ETH_SPEED_NUM_1G;
else if (dpaa_intf->fif->mac_type == fman_mac_10g)
- link->link_speed = 10000;
+ link->link_speed = ETH_SPEED_NUM_10G;
else
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, dpaa_intf->fif->mac_type);
@@ -316,12 +366,12 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
uint64_t values[sizeof(struct dpaa_if_stats) / 8];
- if (xstats == NULL)
- return 0;
-
if (n < num)
return num;
+ if (xstats == NULL)
+ return 0;
+
fman_if_stats_get_all(dpaa_intf->fif, values,
sizeof(struct dpaa_if_stats) / 8);
@@ -335,10 +385,13 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
static int
dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned int limit)
+ unsigned int limit)
{
unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+ if (limit < stat_cnt)
+ return stat_cnt;
+
if (xstats_names != NULL)
for (i = 0; i < stat_cnt; i++)
snprintf(xstats_names[i].name,
@@ -366,7 +419,7 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
return 0;
fman_if_stats_get_all(dpaa_intf->fif, values_copy,
- sizeof(struct dpaa_if_stats));
+ sizeof(struct dpaa_if_stats) / 8);
for (i = 0; i < stat_cnt; i++)
values[i] =
@@ -813,7 +866,7 @@ dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
fman_if_clear_mac_addr(dpaa_intf->fif, index);
}
-static void
+static int
dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
@@ -825,6 +878,8 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
if (ret)
RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
+
+ return ret;
}
static struct eth_dev_ops dpaa_devops = {
@@ -1105,10 +1160,10 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
}
- /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
+ /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
* queues.
*/
- if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
+ if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
DPAA_PMD_ERR("Invalid number of RX queues\n");
return -EINVAL;
}
@@ -1317,6 +1372,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
if (!eth_dev)
return -ENOMEM;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -1366,8 +1422,10 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
/* Invoke PMD device initialization function */
diag = dpaa_dev_init(eth_dev);
- if (diag == 0)
+ if (diag == 0) {
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
+ }
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_free(eth_dev->data->dev_private);
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index c051ae32..1897b9e4 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -51,6 +51,10 @@
/*Maximum number of slots available in TX ring*/
#define DPAA_TX_BURST_SIZE 7
+/* Optimal burst size for RX and TX as default */
+#define DPAA_DEF_RX_BURST_SIZE 7
+#define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE
+
#ifndef VLAN_TAG_SIZE
#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
#endif
@@ -74,14 +78,10 @@
#define DPAA_DEBUG_FQ_TX_ERROR 1
#define DPAA_RSS_OFFLOAD_ALL ( \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+ ETH_RSS_IP | \
+ ETH_RSS_UDP | \
+ ETH_RSS_TCP | \
+ ETH_RSS_SCTP)
#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 0dea8e79..1316d2ad 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -59,7 +59,7 @@
} while (0)
#if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER)
-void dpaa_display_frame(const struct qm_fd *fd)
+static void dpaa_display_frame(const struct qm_fd *fd)
{
int ii;
char *ptr;
@@ -90,11 +90,10 @@ static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
/*TBD:XXX: to be implemented*/
}
-static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
- uint64_t fd_virt_addr)
+static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
{
struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
- uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK;
+ uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK;
DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
@@ -351,7 +350,7 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
prev_seg = cur_seg;
}
- dpaa_eth_packet_info(first_seg, (uint64_t)vaddr);
+ dpaa_eth_packet_info(first_seg, vaddr);
rte_pktmbuf_free_seg(temp);
return first_seg;
@@ -394,7 +393,7 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
mbuf->ol_flags = 0;
mbuf->next = NULL;
rte_mbuf_refcnt_set(mbuf, 1);
- dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
+ dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
return mbuf;
}
@@ -455,7 +454,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
mbuf->ol_flags = 0;
mbuf->next = NULL;
rte_mbuf_refcnt_set(mbuf, 1);
- dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
+ dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
}
}
@@ -593,7 +592,7 @@ uint16_t dpaa_eth_queue_rx(void *q,
static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
{
int ret;
- uint64_t buf = 0;
+ size_t buf = 0;
struct bm_buffer bufs;
ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
@@ -602,10 +601,10 @@ static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
return (void *)buf;
}
- DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d",
+ DPAA_DP_LOG(DEBUG, "got buffer 0x%" PRIx64 " from pool %d",
(uint64_t)bufs.addr, bufs.bpid);
- buf = (uint64_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr)
+ buf = (size_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr)
- bp_info->meta_data_size;
if (!buf)
goto out;
@@ -826,6 +825,8 @@ tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
}
DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid);
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
+ dpaa_unsegmented_checksum(mbuf, fd_arr);
return 0;
}
diff --git a/drivers/net/dpaa/meson.build b/drivers/net/dpaa/meson.build
new file mode 100644
index 00000000..62dec7b0
--- /dev/null
+++ b/drivers/net/dpaa/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['mempool_dpaa']
+
+sources = files('dpaa_ethdev.c',
+ 'dpaa_rxtx.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa.h')
diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map
index 3b937b10..c7ad4030 100644
--- a/drivers/net/dpaa/rte_pmd_dpaa_version.map
+++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map
@@ -9,6 +9,4 @@ EXPERIMENTAL {
dpaa_eth_eventq_attach;
dpaa_eth_eventq_detach;
rte_pmd_dpaa_set_tx_loopback;
-
- local: *;
-} DPDK_17.11;
+};
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
index 5a93a0b9..9b0b1433 100644
--- a/drivers/net/dpaa2/Makefile
+++ b/drivers/net/dpaa2/Makefile
@@ -10,14 +10,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_dpaa2.a
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
-
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
@@ -25,7 +19,6 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
-CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
# versioning export map
@@ -34,6 +27,9 @@ EXPORT_MAP := rte_pmd_dpaa2_version.map
# library version
LIBABIVER := 1
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index b93376de..713a41bf 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -17,7 +17,7 @@
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <fslmc_logs.h>
+#include <dpaa2_pmd_logs.h>
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_mempool.h>
@@ -42,7 +42,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
p_params = rte_malloc(
NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
if (!p_params) {
- PMD_INIT_LOG(ERR, "Memory unavailable");
+ DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
return -ENOMEM;
}
memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
@@ -50,8 +50,8 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
if (ret) {
- PMD_INIT_LOG(ERR, "given rss_hf (%lx) not supported",
- req_dist_set);
+ DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
+ req_dist_set);
rte_free(p_params);
return ret;
}
@@ -61,7 +61,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
if (ret) {
- PMD_INIT_LOG(ERR, "Unable to prepare extract parameters");
+ DPAA2_PMD_ERR("Unable to prepare extract parameters");
rte_free(p_params);
return ret;
}
@@ -70,7 +70,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
&tc_cfg);
rte_free(p_params);
if (ret) {
- PMD_INIT_LOG(ERR,
+ DPAA2_PMD_ERR(
"Setting distribution for Rx failed with err: %d",
ret);
return ret;
@@ -93,7 +93,7 @@ int dpaa2_remove_flow_dist(
p_params = rte_malloc(
NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
if (!p_params) {
- PMD_INIT_LOG(ERR, "Memory unavailable");
+ DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
return -ENOMEM;
}
memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
@@ -105,7 +105,7 @@ int dpaa2_remove_flow_dist(
ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
if (ret) {
- PMD_INIT_LOG(ERR, "Unable to prepare extract parameters");
+ DPAA2_PMD_ERR("Unable to prepare extract parameters");
rte_free(p_params);
return ret;
}
@@ -114,8 +114,8 @@ int dpaa2_remove_flow_dist(
&tc_cfg);
rte_free(p_params);
if (ret)
- PMD_INIT_LOG(ERR,
- "Setting distribution for Rx failed with err:%d",
+ DPAA2_PMD_ERR(
+ "Setting distribution for Rx failed with err: %d",
ret);
return ret;
}
@@ -256,7 +256,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
break;
default:
- PMD_INIT_LOG(WARNING,
+ DPAA2_PMD_WARN(
"Unsupported flow dist option %x",
dist_field);
return -EINVAL;
@@ -307,7 +307,7 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_RX, &layout);
if (retcode) {
- PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout\n",
+ DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
retcode);
return retcode;
}
@@ -322,9 +322,9 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
if (retcode != 0) {
- PMD_INIT_LOG(ERR, "Error in attaching the buffer pool list"
- " bpid = %d Error code = %d\n",
- bpool_cfg.pools[0].dpbp_id, retcode);
+ DPAA2_PMD_ERR("Error configuring buffer pool on interface."
+ " bpid = %d error code = %d",
+ bpool_cfg.pools[0].dpbp_id, retcode);
return retcode;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 09a11d65..9297725d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -18,7 +18,7 @@
#include <rte_dev.h>
#include <rte_fslmc.h>
-#include <fslmc_logs.h>
+#include "dpaa2_pmd_logs.h"
#include <fslmc_vfio.h>
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_mempool.h>
@@ -27,6 +27,36 @@
#include "dpaa2_ethdev.h"
#include <fsl_qbman_debug.h>
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_MT_LOCKFREE |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint8_t page_id; /* dpni statistics page id */
@@ -57,57 +87,7 @@ static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &dev->data->dev_link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
+int dpaa2_logtype_pmd;
static int
dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
@@ -119,7 +99,7 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return -1;
}
@@ -131,8 +111,8 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
priv->token, vlan_id);
if (ret < 0)
- PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
- ret, vlan_id, priv->hw_id);
+ DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
+ ret, vlan_id, priv->hw_id);
return ret;
}
@@ -149,25 +129,25 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_FILTER_MASK) {
/* VLAN Filter not avaialble */
if (!priv->max_vlan_filters) {
- RTE_LOG(INFO, PMD, "VLAN filter not available\n");
+ DPAA2_PMD_INFO("VLAN filter not available");
goto next_mask;
}
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
priv->token, true);
else
ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
- ret);
+ DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
}
next_mask:
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
- RTE_LOG(INFO, PMD,
- "VLAN extend offload not supported\n");
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
+ DPAA2_PMD_INFO("VLAN extend offload not supported");
}
return 0;
@@ -187,10 +167,10 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
- RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n");
+ DPAA2_PMD_WARN("\tmc_get_soc_version failed");
if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
- RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
+ DPAA2_PMD_WARN("\tmc_get_version failed");
ret = snprintf(fw_version, fw_size,
"%x-%d.%d.%d",
@@ -220,20 +200,18 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->rx_offload_capa = dev_rx_offloads_sup |
+ dev_rx_offloads_nodis;
+ dev_info->tx_offload_capa = dev_tx_offloads_sup |
+ dev_tx_offloads_nodis;
dev_info->speed_capa = ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
ETH_LINK_SPEED_10G;
+
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vfs = 0;
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
}
static int
@@ -253,7 +231,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
RTE_CACHE_LINE_SIZE);
if (!mc_q) {
- PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
+ DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
return -1;
}
@@ -320,18 +298,39 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = priv->hw;
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
- int rx_ip_csum_offload = false;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint64_t tx_offloads = eth_conf->txmode.offloads;
+ int rx_l3_csum_offload = false;
+ int rx_l4_csum_offload = false;
+ int tx_l3_csum_offload = false;
+ int tx_l4_csum_offload = false;
int ret;
PMD_INIT_FUNC_TRACE();
- if (eth_conf->rxmode.jumbo_frame == 1) {
+ /* Rx offloads validation */
+ if (dev_rx_offloads_nodis & ~rx_offloads) {
+ DPAA2_PMD_WARN(
+ "Rx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ rx_offloads, dev_rx_offloads_nodis);
+ }
+
+ /* Tx offloads validation */
+ if (dev_tx_offloads_nodis & ~tx_offloads) {
+ DPAA2_PMD_WARN(
+ "Tx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ tx_offloads, dev_tx_offloads_nodis);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
priv->token, eth_conf->rxmode.max_rx_pkt_len);
if (ret) {
- PMD_INIT_LOG(ERR,
- "unable to set mtu. check config\n");
+ DPAA2_PMD_ERR(
+ "Unable to set mtu. check config");
return ret;
}
} else {
@@ -343,40 +342,52 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
ret = dpaa2_setup_flow_dist(dev,
eth_conf->rx_adv_conf.rss_conf.rss_hf);
if (ret) {
- PMD_INIT_LOG(ERR, "unable to set flow distribution."
- "please check queue config\n");
+ DPAA2_PMD_ERR("Unable to set flow distribution."
+ "Check queue config");
return ret;
}
}
- if (eth_conf->rxmode.hw_ip_checksum)
- rx_ip_csum_offload = true;
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ rx_l3_csum_offload = true;
+
+ if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
+ (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
+ rx_l4_csum_offload = true;
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload);
+ DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
+ DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
return ret;
}
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload);
+ DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
+ DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
return ret;
}
+ if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ tx_l3_csum_offload = true;
+
+ if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
+ (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+ (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+ tx_l4_csum_offload = true;
+
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_TX_L3_CSUM, true);
+ DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
+ DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
return ret;
}
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_TX_L4_CSUM, true);
+ DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
+ DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
return ret;
}
@@ -390,14 +401,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
DPNI_FLCTYPE_HASH, true);
if (ret) {
- PMD_INIT_LOG(ERR, "Error setting FLCTYPE: Err = %d\n",
- ret);
+ DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
return ret;
}
}
- if (eth_conf->rxmode.hw_vlan_filter)
- dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
/* update the current status */
dpaa2_dev_link_update(dev, 0);
@@ -427,8 +436,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
- dev, rx_queue_id, mb_pool, rx_conf);
+ DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
+ dev, rx_queue_id, mb_pool, rx_conf);
if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
bpid = mempool_to_bpid(mb_pool);
@@ -445,7 +454,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
memset(&cfg, 0, sizeof(struct dpni_queue));
options = options | DPNI_QUEUE_OPT_USER_CTX;
- cfg.user_context = (uint64_t)(dpaa2_q);
+ cfg.user_context = (size_t)(dpaa2_q);
/*if ls2088 or rev2 device, enable the stashing */
@@ -467,7 +476,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
dpaa2_q->tc_index, flow_id, options, &cfg);
if (ret) {
- PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
+ DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
return -1;
}
@@ -479,14 +488,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
taildrop.threshold = CONG_THRESHOLD_RX_Q;
taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
taildrop.oal = CONG_RX_OAL;
- PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d",
- rx_queue_id);
+ DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
+ rx_queue_id);
ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
DPNI_CP_QUEUE, DPNI_QUEUE_RX,
dpaa2_q->tc_index, flow_id, &taildrop);
if (ret) {
- PMD_INIT_LOG(ERR, "Error in setting the rx flow"
- " err : = %d\n", ret);
+ DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
+ ret);
return -1;
}
}
@@ -529,9 +538,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
tc_id, flow_id, options, &tx_flow_cfg);
if (ret) {
- PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
- "tc_id=%d, flow =%d ErrorCode = %x\n",
- tc_id, flow_id, -ret);
+ DPAA2_PMD_ERR("Error in setting the tx flow: "
+ "tc_id=%d, flow=%d err=%d",
+ tc_id, flow_id, ret);
return -1;
}
@@ -543,8 +552,8 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
priv->token,
DPNI_CONF_DISABLE);
if (ret) {
- PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
- " ErrorCode = %x", ret);
+ DPAA2_PMD_ERR("Error in set tx conf mode settings: "
+ "err=%d", ret);
return -1;
}
}
@@ -560,7 +569,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
*/
cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
cong_notif_cfg.message_ctx = 0;
- cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
+ cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn;
cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
cong_notif_cfg.notification_mode =
DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
@@ -573,9 +582,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
tc_id,
&cong_notif_cfg);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Error in setting tx congestion notification: = %d",
- -ret);
+ DPAA2_PMD_ERR(
+ "Error in setting tx congestion notification: "
+ "err=%d", ret);
return -ret;
}
}
@@ -610,7 +619,7 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_PMD_ERR("Failure in affining portal");
return -EINVAL;
}
}
@@ -620,8 +629,8 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
frame_cnt = qbman_fq_state_frame_count(&state);
- RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
- rx_queue_id, frame_cnt);
+ DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
+ rx_queue_id, frame_cnt);
}
return frame_cnt;
}
@@ -670,14 +679,14 @@ dpaa2_interrupt_handler(void *param)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
irq_index, &status);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret);
+ DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
clear = 0xffffffff;
goto out;
}
@@ -693,7 +702,7 @@ out:
ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
irq_index, clear);
if (unlikely(ret))
- RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret);
+ DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
}
static int
@@ -710,16 +719,16 @@ dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
irq_index, mask);
if (err < 0) {
- PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err,
- strerror(-err));
+ DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
+ strerror(-err));
return err;
}
err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
irq_index, enable);
if (err < 0)
- PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err,
- strerror(-err));
+ DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
+ strerror(-err));
return err;
}
@@ -747,8 +756,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
- ret, priv->hw_id);
+ DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
+ priv->hw_id, ret);
return ret;
}
@@ -758,7 +767,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &qdid);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
+ DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
return ret;
}
priv->qdid = qdid;
@@ -769,8 +778,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
DPNI_QUEUE_RX, dpaa2_q->tc_index,
dpaa2_q->flow_id, &cfg, &qid);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to get flow "
- "information Error code = %d\n", ret);
+ DPAA2_PMD_ERR("Error in getting flow information: "
+ "err=%d", ret);
return ret;
}
dpaa2_q->fqid = qid.fqid;
@@ -785,8 +794,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
priv->token, &err_cfg);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
- "code = %d\n", ret);
+ DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
+ ret);
return ret;
}
@@ -845,14 +854,14 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
- ret, priv->hw_id);
+ DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
+ ret, priv->hw_id);
return;
}
/* clear the recorded link status */
memset(&link, 0, sizeof(link));
- dpaa2_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
}
static void
@@ -878,13 +887,12 @@ dpaa2_dev_close(struct rte_eth_dev *dev)
/* Clean the device first */
ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
- " error code %d\n", ret);
+ DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
return;
}
memset(&link, 0, sizeof(link));
- dpaa2_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
}
static void
@@ -898,17 +906,17 @@ dpaa2_dev_promiscuous_enable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
}
static void
@@ -922,21 +930,20 @@ dpaa2_dev_promiscuous_disable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
if (dev->data->all_multicast == 0) {
ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD,
- "Unable to disable M promisc mode %d\n",
- ret);
+ DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
+ ret);
}
}
@@ -951,13 +958,13 @@ dpaa2_dev_allmulticast_enable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
}
static void
@@ -970,7 +977,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
@@ -980,7 +987,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
}
static int
@@ -995,7 +1002,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return -EINVAL;
}
@@ -1004,9 +1011,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
if (frame_size > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -1016,10 +1025,10 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
frame_size);
if (ret) {
- PMD_DRV_LOG(ERR, "setting the max frame length failed");
+ DPAA2_PMD_ERR("Setting the max frame length failed");
return -1;
}
- PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu);
+ DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
return 0;
}
@@ -1036,15 +1045,15 @@ dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return -1;
}
ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
priv->token, addr->addr_bytes);
if (ret)
- RTE_LOG(ERR, PMD,
- "error: Adding the MAC ADDR failed: err = %d\n", ret);
+ DPAA2_PMD_ERR(
+ "error: Adding the MAC ADDR failed: err = %d", ret);
return 0;
}
@@ -1063,18 +1072,18 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
macaddr = &data->mac_addrs[index];
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
priv->token, macaddr->addr_bytes);
if (ret)
- RTE_LOG(ERR, PMD,
- "error: Removing the MAC ADDR failed: err = %d\n", ret);
+ DPAA2_PMD_ERR(
+ "error: Removing the MAC ADDR failed: err = %d", ret);
}
-static void
+static int
dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
@@ -1085,17 +1094,20 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
- return;
+ DPAA2_PMD_ERR("dpni is NULL");
+ return -EINVAL;
}
ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
priv->token, addr->addr_bytes);
if (ret)
- RTE_LOG(ERR, PMD,
- "error: Setting the MAC ADDR failed %d\n", ret);
+ DPAA2_PMD_ERR(
+ "error: Setting the MAC ADDR failed %d", ret);
+
+ return ret;
}
+
static
int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
@@ -1111,12 +1123,12 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (!dpni) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return -EINVAL;
}
if (!stats) {
- RTE_LOG(ERR, PMD, "stats is NULL\n");
+ DPAA2_PMD_ERR("stats is NULL");
return -EINVAL;
}
@@ -1155,7 +1167,7 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
return 0;
err:
- RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
+ DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
return retcode;
};
@@ -1169,12 +1181,12 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
union dpni_statistics value[3] = {};
unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
- if (xstats == NULL)
- return 0;
-
if (n < num)
return num;
+ if (xstats == NULL)
+ return 0;
+
/* Get Counters from page_0*/
retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
0, 0, &value[0]);
@@ -1200,17 +1212,20 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
}
return i;
err:
- RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode);
+ DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
return retcode;
}
static int
dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned int limit)
+ unsigned int limit)
{
unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
+ if (limit < stat_cnt)
+ return stat_cnt;
+
if (xstats_names != NULL)
for (i = 0; i < stat_cnt; i++)
snprintf(xstats_names[i].name,
@@ -1269,7 +1284,7 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
for (i = 0; i < n; i++) {
if (ids[i] >= stat_cnt) {
- PMD_INIT_LOG(ERR, "id value isn't valid");
+ DPAA2_PMD_ERR("xstats id value isn't valid");
return -1;
}
values[i] = values_copy[ids[i]];
@@ -1294,7 +1309,7 @@ dpaa2_xstats_get_names_by_id(
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- PMD_INIT_LOG(ERR, "id value isn't valid");
+ DPAA2_PMD_ERR("xstats id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
@@ -1312,7 +1327,7 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
@@ -1323,7 +1338,7 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
return;
error:
- RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
+ DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
return;
};
@@ -1335,24 +1350,17 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
struct dpni_link_state state = {0};
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return 0;
}
- memset(&old, 0, sizeof(old));
- dpaa2_dev_atomic_read_link_status(dev, &old);
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
- return -1;
- }
-
- if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
- RTE_LOG(DEBUG, PMD, "No change in status\n");
+ DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
return -1;
}
@@ -1365,13 +1373,14 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
else
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- dpaa2_dev_atomic_write_link_status(dev, &link);
-
- if (link.link_status)
- PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == -1)
+ DPAA2_PMD_DEBUG("No change in status");
else
- PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id);
- return 0;
+ DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
+ link.link_status ? "Up" : "Down");
+
+ return ret;
}
/**
@@ -1391,7 +1400,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
dpni = (struct fsl_mc_io *)priv->hw;
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "DPNI is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return ret;
}
@@ -1399,7 +1408,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
if (ret) {
/* Unable to obtain dpni status; Not continuing */
- PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
+ DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
return -EINVAL;
}
@@ -1407,13 +1416,13 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
if (!en) {
ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
+ DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
return -EINVAL;
}
}
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
+ DPAA2_PMD_ERR("Unable to get link state (%d)", ret);
return -1;
}
@@ -1422,10 +1431,9 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
dev->data->dev_link.link_status = state.up;
if (state.up)
- PMD_DRV_LOG(INFO, "Port %d Link is set as UP",
- dev->data->port_id);
+ DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
else
- PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id);
+ DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
return ret;
}
@@ -1448,7 +1456,7 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
dpni = (struct fsl_mc_io *)priv->hw;
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "Device has not yet been configured\n");
+ DPAA2_PMD_ERR("Device has not yet been configured");
return ret;
}
@@ -1461,12 +1469,12 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
do {
ret = dpni_disable(dpni, 0, priv->token);
if (ret) {
- PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret);
+ DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
return ret;
}
ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
if (ret) {
- PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret);
+ DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
return ret;
}
if (dpni_enabled)
@@ -1475,12 +1483,12 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
} while (dpni_enabled && --retries);
if (!retries) {
- PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n");
+ DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
/* todo- we may have to manually cleanup queues.
*/
} else {
- PMD_DRV_LOG(INFO, "Port %d Link DOWN successful",
- dev->data->port_id);
+ DPAA2_PMD_INFO("Port %d Link DOWN successful",
+ dev->data->port_id);
}
dev->data->dev_link.link_status = 0;
@@ -1502,13 +1510,13 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
dpni = (struct fsl_mc_io *)priv->hw;
if (dpni == NULL || fc_conf == NULL) {
- RTE_LOG(ERR, PMD, "device not configured\n");
+ DPAA2_PMD_ERR("device not configured");
return ret;
}
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret) {
- RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
+ DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
return ret;
}
@@ -1558,7 +1566,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
dpni = (struct fsl_mc_io *)priv->hw;
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return ret;
}
@@ -1568,7 +1576,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
*/
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret);
+ DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
return -1;
}
@@ -1613,16 +1621,15 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
break;
default:
- RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n",
- fc_conf->mode);
+ DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
+ fc_conf->mode);
return -1;
}
ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
if (ret)
- RTE_LOG(ERR, PMD,
- "Unable to set Link configuration (err=%d)\n",
- ret);
+ DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
+ ret);
/* Enable link */
dpaa2_dev_set_link_up(dev);
@@ -1643,13 +1650,13 @@ dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
if (rss_conf->rss_hf) {
ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
if (ret) {
- PMD_INIT_LOG(ERR, "unable to set flow dist");
+ DPAA2_PMD_ERR("Unable to set flow dist");
return ret;
}
} else {
ret = dpaa2_remove_flow_dist(dev, 0);
if (ret) {
- PMD_INIT_LOG(ERR, "unable to remove flow dist");
+ DPAA2_PMD_ERR("Unable to remove flow dist");
return ret;
}
}
@@ -1702,12 +1709,12 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
}
options |= DPNI_QUEUE_OPT_USER_CTX;
- cfg.user_context = (uint64_t)(dpaa2_ethq);
+ cfg.user_context = (size_t)(dpaa2_ethq);
ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
dpaa2_ethq->tc_index, flow_id, options, &cfg);
if (ret) {
- RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+ DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
return ret;
}
@@ -1734,7 +1741,7 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
dpaa2_ethq->tc_index, flow_id, options, &cfg);
if (ret)
- RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+ DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
return ret;
}
@@ -1801,15 +1808,15 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
if (!dpni_dev) {
- PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
+ DPAA2_PMD_ERR("Memory allocation failed for dpni device");
return -1;
}
dpni_dev->regs = rte_mcp_ptr_list[0];
ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Failure in opening dpni@%d with err code %d\n",
+ DPAA2_PMD_ERR(
+ "Failure in opening dpni@%d with err code %d",
hw_id, ret);
rte_free(dpni_dev);
return -1;
@@ -1818,16 +1825,15 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* Clean the device first */
ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Failure cleaning dpni@%d with err code %d\n",
- hw_id, ret);
+ DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
+ hw_id, ret);
goto init_err;
}
ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Failure in get dpni@%d attribute, err code %d\n",
+ DPAA2_PMD_ERR(
+ "Failure in get dpni@%d attribute, err code %d",
hw_id, ret);
goto init_err;
}
@@ -1843,8 +1849,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* Using number of TX queues as number of TX TCs */
priv->nb_tx_queues = attr.num_tx_tcs;
- PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
- priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues);
+ DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
+ priv->num_rx_tc, priv->nb_rx_queues,
+ priv->nb_tx_queues);
priv->hw = dpni_dev;
priv->hw_id = hw_id;
@@ -1856,7 +1863,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for hardware structure for queues */
ret = dpaa2_alloc_rx_tx_queues(eth_dev);
if (ret) {
- PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
+ DPAA2_PMD_ERR("Queue allocation Failed");
goto init_err;
}
@@ -1864,9 +1871,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = rte_zmalloc("dpni",
ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR,
+ DPAA2_PMD_ERR(
"Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * attr.mac_filter_entries);
+ ETHER_ADDR_LEN * attr.mac_filter_entries);
ret = -ENOMEM;
goto init_err;
}
@@ -1875,7 +1882,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
priv->token,
(uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
if (ret) {
- PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
+ DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d",
ret);
goto init_err;
}
@@ -1887,8 +1894,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &layout);
if (ret) {
- PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
- ret);
+ DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
goto init_err;
}
@@ -1899,7 +1905,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX_CONFIRM, &layout);
if (ret) {
- PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
+ DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
ret);
goto init_err;
}
@@ -1908,7 +1914,6 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
- rte_fslmc_vfio_dmamap();
RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
return 0;
@@ -1931,7 +1936,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
if (!dpni) {
- PMD_INIT_LOG(WARNING, "Already closed or not started");
+ DPAA2_PMD_WARN("Already closed or not started");
return -1;
}
@@ -1958,8 +1963,8 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
/* Close the device at underlying layer*/
ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Failure closing dpni device with err code %d\n",
+ DPAA2_PMD_ERR(
+ "Failure closing dpni device with err code %d",
ret);
}
@@ -1971,7 +1976,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
- RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
+ DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
return 0;
}
@@ -1991,8 +1996,8 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
sizeof(struct dpaa2_dev_priv),
RTE_CACHE_LINE_SIZE);
if (eth_dev->data->dev_private == NULL) {
- PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
- " private port data\n");
+ DPAA2_PMD_CRIT(
+ "Unable to allocate memory for private data");
rte_eth_dev_release_port(eth_dev);
return -ENOMEM;
}
@@ -2013,8 +2018,10 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
/* Invoke PMD device initialization function */
diag = dpaa2_dev_init(eth_dev);
- if (diag == 0)
+ if (diag == 0) {
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
+ }
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_free(eth_dev->data->dev_private);
@@ -2045,3 +2052,12 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = {
};
RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
+
+RTE_INIT(dpaa2_pmd_init_log);
+static void
+dpaa2_pmd_init_log(void)
+{
+ dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
+ if (dpaa2_logtype_pmd >= 0)
+ rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index ba0856f3..bd69f523 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -50,6 +50,12 @@
/* Disable RX tail drop, default is enable */
#define DPAA2_RX_TAILDROP_OFF 0x04
+#define DPAA2_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IP | \
+ ETH_RSS_UDP | \
+ ETH_RSS_TCP | \
+ ETH_RSS_SCTP)
+
/* LX2 FRC Parsed values (Little Endian) */
#define DPAA2_PKT_TYPE_ETHER 0x0060
#define DPAA2_PKT_TYPE_IPV4 0x0000
diff --git a/drivers/net/dpaa2/dpaa2_pmd_logs.h b/drivers/net/dpaa2/dpaa2_pmd_logs.h
new file mode 100644
index 00000000..98a48968
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_pmd_logs.h
@@ -0,0 +1,41 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef _DPAA2_PMD_LOGS_H_
+#define _DPAA2_PMD_LOGS_H_
+
+extern int dpaa2_logtype_pmd;
+
+#define DPAA2_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_pmd, "dpaa2_net: " \
+ fmt "\n", ##args)
+
+#define DPAA2_PMD_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_pmd, "dpaa2_net: %s(): "\
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_LOG(DEBUG, " >>")
+
+#define DPAA2_PMD_CRIT(fmt, args...) \
+ DPAA2_PMD_LOG(CRIT, fmt, ## args)
+#define DPAA2_PMD_INFO(fmt, args...) \
+ DPAA2_PMD_LOG(INFO, fmt, ## args)
+#define DPAA2_PMD_ERR(fmt, args...) \
+ DPAA2_PMD_LOG(ERR, fmt, ## args)
+#define DPAA2_PMD_WARN(fmt, args...) \
+ DPAA2_PMD_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_PMD_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_PMD_DP_DEBUG(fmt, args...) \
+ DPAA2_PMD_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_PMD_DP_INFO(fmt, args...) \
+ DPAA2_PMD_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_PMD_DP_WARN(fmt, args...) \
+ DPAA2_PMD_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_PMD_LOGS_H_ */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 183293c1..dac086d6 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -16,13 +16,12 @@
#include <rte_dev.h>
#include <rte_fslmc.h>
-#include <fslmc_logs.h>
#include <fslmc_vfio.h>
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_dpio.h>
#include <dpaa2_hw_mempool.h>
-#include <dpaa2_eventdev.h>
+#include "dpaa2_pmd_logs.h"
#include "dpaa2_ethdev.h"
#include "base/dpaa2_hw_dpni_annot.h"
@@ -37,7 +36,7 @@
static inline void __attribute__((hot))
dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
{
- PMD_RX_LOG(DEBUG, "frc = 0x%x ", frc);
+ DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc);
m->packet_type = RTE_PTYPE_UNKNOWN;
switch (frc) {
@@ -104,13 +103,12 @@ dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
}
static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr)
+dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation)
{
uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
- struct dpaa2_annot_hdr *annotation =
- (struct dpaa2_annot_hdr *)hw_annot_addr;
- PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
+ DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t",
+ annotation->word4);
if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
pkt_type = RTE_PTYPE_L2_ETHER_ARP;
goto parse_done;
@@ -167,12 +165,13 @@ parse_done:
}
static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr)
+dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
{
struct dpaa2_annot_hdr *annotation =
(struct dpaa2_annot_hdr *)hw_annot_addr;
- PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
+ DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
+ annotation->word4);
/* Check offloads first */
if (BIT_ISSET_AT_POS(annotation->word3,
@@ -203,29 +202,27 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr)
return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_UDP;
default:
- PMD_RX_LOG(DEBUG, "Slow parse the parsing results\n");
break;
}
- return dpaa2_dev_rx_parse_slow(hw_annot_addr);
+ return dpaa2_dev_rx_parse_slow(annotation);
}
static inline struct rte_mbuf *__attribute__((hot))
eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
{
struct qbman_sge *sgt, *sge;
- dma_addr_t sg_addr;
+ size_t sg_addr, fd_addr;
int i = 0;
- uint64_t fd_addr;
struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
- fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
/* Get Scatter gather table address */
sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
sge = &sgt[i++];
- sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
+ sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
/* First Scatter gather entry */
first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
@@ -243,14 +240,14 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
DPAA2_GET_FD_FRC_PARSE_SUM(fd));
else
first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
- (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
- + DPAA2_FD_PTA_SIZE);
+ (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE));
rte_mbuf_refcnt_set(first_seg, 1);
cur_seg = first_seg;
while (!DPAA2_SG_IS_FINAL(sge)) {
sge = &sgt[i++];
- sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
+ sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
DPAA2_GET_FLE_ADDR(sge));
next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
@@ -299,11 +296,11 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
else
mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
- (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
- + DPAA2_FD_PTA_SIZE);
+ (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE));
- PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
- "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
+ DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
mbuf, mbuf->buf_addr, mbuf->data_off,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
@@ -320,15 +317,9 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
struct qbman_sge *sgt, *sge = NULL;
int i;
- if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
- int ret = rte_vlan_insert(&mbuf);
- if (ret)
- return ret;
- }
-
temp = rte_pktmbuf_alloc(mbuf->pool);
if (temp == NULL) {
- PMD_TX_LOG(ERR, "No memory to allocate S/G table");
+ DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
return -ENOMEM;
}
@@ -340,7 +331,7 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
/*Set Scatter gather table and Scatter gather entries*/
sgt = (struct qbman_sge *)(
- (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ DPAA2_GET_FD_OFFSET(fd));
for (i = 0; i < mbuf->nb_segs; i++) {
@@ -392,17 +383,10 @@ static void __attribute__ ((noinline)) __attribute__((hot))
eth_mbuf_to_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd, uint16_t bpid)
{
- if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
- if (rte_vlan_insert(&mbuf)) {
- rte_pktmbuf_free(mbuf);
- return;
- }
- }
-
DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
- PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
- "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
+ DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
mbuf, mbuf->buf_addr, mbuf->data_off,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
@@ -431,15 +415,9 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
struct rte_mbuf *m;
void *mb = NULL;
- if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
- int ret = rte_vlan_insert(&mbuf);
- if (ret)
- return ret;
- }
-
if (rte_dpaa2_mbuf_alloc_bulk(
rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
- PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
+ DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
return -1;
}
m = (struct rte_mbuf *)mb;
@@ -455,17 +433,18 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
- PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
- (void *)mbuf, mbuf->buf_addr);
-
- PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
- DPAA2_GET_FD_ADDR(fd),
+ DPAA2_PMD_DP_DEBUG(
+ "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
+ " meta: %d, off: %d, len: %d\n",
+ (void *)mbuf,
+ mbuf->buf_addr,
+ DPAA2_GET_FD_ADDR(fd),
DPAA2_GET_FD_BPID(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
DPAA2_GET_FD_OFFSET(fd),
DPAA2_GET_FD_LEN(fd));
- return 0;
+return 0;
}
uint16_t
@@ -483,14 +462,15 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
struct rte_eth_dev *dev = dpaa2_q->dev;
- if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
- ret = dpaa2_affine_qbman_swp();
+ if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
+ ret = dpaa2_affine_qbman_ethrx_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_PMD_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_PORTAL;
+ swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
+
if (unlikely(!q_storage->active_dqs)) {
q_storage->toggle = 0;
dq_storage = q_storage->dq_storage[q_storage->toggle];
@@ -501,30 +481,32 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
q_storage->last_num_pkts);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
- (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
- if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
while (!qbman_check_command_complete(
- get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+ get_swp_active_dqs(
+ DPAA2_PER_LCORE_ETHRX_DPIO->index)))
;
- clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+ clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
}
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- PMD_RX_LOG(WARNING, "VDQ command is not issued."
- "QBMAN is busy\n");
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
+ " QBMAN is busy (1)\n");
/* Portal was busy, try again */
continue;
}
break;
}
q_storage->active_dqs = dq_storage;
- q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
- set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
+ dq_storage);
}
dq_storage = q_storage->active_dqs;
- rte_prefetch0((void *)((uint64_t)(dq_storage)));
- rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
+ rte_prefetch0((void *)(size_t)(dq_storage));
+ rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
* prefething done on DQRR entries
@@ -535,7 +517,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
- (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
/* Check if the previous issued command is completed.
* Also seems like the SWP is shared between the Ethernet Driver
@@ -554,7 +536,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
*/
while (!qbman_check_new_result(dq_storage))
;
- rte_prefetch0((void *)((uint64_t)(dq_storage + 2)));
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
/* Check whether Last Pull command is Expired and
* setting Condition for Loop termination
*/
@@ -569,7 +551,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
next_fd = qbman_result_DQ_fd(dq_storage + 1);
/* Prefetch Annotation address for the parse results */
- rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd)
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd)
+ DPAA2_FD_PTA_SIZE + 16));
if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
@@ -578,31 +560,31 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
bufs[num_rx] = eth_fd_to_mbuf(fd);
bufs[num_rx]->port = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
rte_vlan_strip(bufs[num_rx]);
dq_storage++;
num_rx++;
} while (pending);
- if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
while (!qbman_check_command_complete(
- get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+ get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
;
- clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+ clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
}
/* issue a volatile dequeue command for next pull */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- PMD_RX_LOG(WARNING, "VDQ command is not issued."
- "QBMAN is busy\n");
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
+ "QBMAN is busy (2)\n");
continue;
}
break;
}
q_storage->active_dqs = dq_storage1;
- q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
- set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
dpaa2_q->rx_pkts += num_rx;
@@ -616,7 +598,7 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
struct dpaa2_queue *rxq,
struct rte_event *ev)
{
- rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
DPAA2_FD_PTA_SIZE + 16));
ev->flow_id = rxq->ev.flow_id;
@@ -641,7 +623,7 @@ dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
{
uint8_t dqrr_index;
- rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
DPAA2_FD_PTA_SIZE + 16));
ev->flow_id = rxq->ev.flow_id;
@@ -686,13 +668,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_PMD_ERR("Failure in affining portal");
return 0;
}
}
swp = DPAA2_PER_LCORE_PORTAL;
- PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
+ DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid);
/*Prepare enqueue descriptor*/
qbman_eq_desc_clear(&eqdesc);
@@ -726,7 +708,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
fd_arr[loop].simple.frc = 0;
DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
- DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
+ DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL);
if (likely(RTE_MBUF_DIRECT(*bufs))) {
mp = (*bufs)->pool;
/* Check the basic scenario and set
@@ -736,8 +718,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
priv->bp_list->dpaa2_ops_index &&
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
- if (unlikely((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT)) {
+ if (unlikely(((*bufs)->ol_flags
+ & PKT_TX_VLAN_PKT) ||
+ (dev->data->dev_conf.txmode.offloads
+ & DEV_TX_OFFLOAD_VLAN_INSERT))) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
@@ -753,19 +737,26 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
/* Not a hw_pkt pool allocated frame */
if (unlikely(!mp || !priv->bp_list)) {
- PMD_TX_LOG(ERR, "err: no bpool attached");
+ DPAA2_PMD_ERR("Err: No buffer pool attached");
goto send_n_return;
}
+ if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+ (dev->data->dev_conf.txmode.offloads
+ & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ int ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_n_return;
+ }
if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
- PMD_TX_LOG(ERR, "non hw offload bufffer ");
+ DPAA2_PMD_WARN("Non DPAA2 buffer pool");
/* alloc should be from the default buffer pool
* attached to this interface
*/
bpid = priv->bp_list->buf_pool.bpid;
if (unlikely((*bufs)->nb_segs > 1)) {
- PMD_TX_LOG(ERR, "S/G support not added"
+ DPAA2_PMD_ERR("S/G support not added"
" for non hw offload buffer");
goto send_n_return;
}
diff --git a/drivers/net/dpaa2/meson.build b/drivers/net/dpaa2/meson.build
new file mode 100644
index 00000000..213f0d72
--- /dev/null
+++ b/drivers/net/dpaa2/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['mempool_dpaa2']
+sources = files('base/dpaa2_hw_dpni.c',
+ 'dpaa2_ethdev.c',
+ 'dpaa2_rxtx.c',
+ 'mc/dpkg.c',
+ 'mc/dpni.c')
+
+includes += include_directories('base', 'mc')
+
+# depends on fslmc bus which uses experimental API
+allow_experimental_apis = true
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index ba81a1f4..9c87e883 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -22,7 +22,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
-CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259
+CFLAGS_BASE_DRIVER = -diag-disable 177 -diag-disable 181
+CFLAGS_BASE_DRIVER += -diag-disable 869 -diag-disable 2259
else
#
# CFLAGS for gcc/clang
@@ -61,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_logs.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c
diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c
index 15c7dd84..da1a9a70 100644
--- a/drivers/net/e1000/base/e1000_82575.c
+++ b/drivers/net/e1000/base/e1000_82575.c
@@ -312,6 +312,9 @@ STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
break;
+ case BCM54616_E_PHY_ID:
+ phy->type = e1000_phy_none;
+ break;
default:
ret_val = -E1000_ERR_PHY;
goto out;
@@ -1607,6 +1610,8 @@ STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
case e1000_phy_82580:
ret_val = e1000_copper_link_setup_82577(hw);
break;
+ case e1000_phy_none:
+ break;
default:
ret_val = -E1000_ERR_PHY;
break;
diff --git a/drivers/net/e1000/base/e1000_defines.h b/drivers/net/e1000/base/e1000_defines.h
index dbc2bbbe..e2101c17 100644
--- a/drivers/net/e1000/base/e1000_defines.h
+++ b/drivers/net/e1000/base/e1000_defines.h
@@ -1274,6 +1274,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I350_I_PHY_ID 0x015403B0
#define I210_I_PHY_ID 0x01410C00
#define IGP04E1000_E_PHY_ID 0x02A80391
+#define BCM54616_E_PHY_ID 0x03625D10
#define M88_VENDOR 0x0141
/* M88E1000 Specific Registers */
diff --git a/drivers/net/e1000/base/e1000_phy.h b/drivers/net/e1000/base/e1000_phy.h
index 3e45a9ef..2cd0e14b 100644
--- a/drivers/net/e1000/base/e1000_phy.h
+++ b/drivers/net/e1000/base/e1000_phy.h
@@ -330,4 +330,12 @@ struct sfp_e1000_flags {
#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
+/* EEPROM byte offsets */
+#define IGB_SFF_8472_SWAP 0x5C
+#define IGB_SFF_8472_COMP 0x5E
+
+/* Bitmasks */
+#define IGB_SFF_ADDRESSING_MODE 0x4
+#define IGB_SFF_8472_UNSUP 0x00
+
#endif
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 23b089c8..902001f3 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -4,6 +4,10 @@
#ifndef _E1000_ETHDEV_H_
#define _E1000_ETHDEV_H_
+
+#include <stdint.h>
+
+#include <rte_flow.h>
#include <rte_time.h>
#include <rte_pci.h>
@@ -27,6 +31,7 @@
#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
#define IGB_VFTA_SIZE 128
+#define IGB_HKEY_MAX_INDEX 10
#define IGB_MAX_RX_QUEUE_NUM 8
#define IGB_MAX_RX_QUEUE_NUM_82576 16
@@ -229,8 +234,8 @@ struct igb_ethertype_filter {
};
struct igb_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
- uint16_t num; /**< Number of entries in queue[]. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
@@ -357,6 +362,9 @@ void eth_igb_rx_queue_release(void *rxq);
void igb_dev_clear_queues(struct rte_eth_dev *dev);
void igb_dev_free_queues(struct rte_eth_dev *dev);
+uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
@@ -370,6 +378,9 @@ int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
+uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -417,6 +428,8 @@ void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
+
/*
* RX/TX EM function prototypes
*/
@@ -426,6 +439,9 @@ void eth_em_rx_queue_release(void *rxq);
void em_dev_clear_queues(struct rte_eth_dev *dev);
void em_dev_free_queues(struct rte_eth_dev *dev);
+uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
@@ -439,6 +455,9 @@ int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
+uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -487,6 +506,10 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add);
+int igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int igb_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int igb_config_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *conf,
bool add);
diff --git a/drivers/net/e1000/e1000_logs.c b/drivers/net/e1000/e1000_logs.c
new file mode 100644
index 00000000..22173939
--- /dev/null
+++ b/drivers/net/e1000/e1000_logs.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "e1000_logs.h"
+
+/* declared as extern in e1000_logs.h */
+int e1000_logtype_init;
+int e1000_logtype_driver;
+
+/* avoids double registering of logs if EM and IGB drivers are in use */
+static int e1000_log_initialized;
+
+void
+e1000_igb_init_log(void)
+{
+ if (!e1000_log_initialized) {
+ e1000_logtype_init = rte_log_register("pmd.net.e1000.init");
+ if (e1000_logtype_init >= 0)
+ rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE);
+ e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver");
+ if (e1000_logtype_driver >= 0)
+ rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE);
+ e1000_log_initialized = 1;
+ }
+}
diff --git a/drivers/net/e1000/e1000_logs.h b/drivers/net/e1000/e1000_logs.h
index 50348e9e..69d3d311 100644
--- a/drivers/net/e1000/e1000_logs.h
+++ b/drivers/net/e1000/e1000_logs.h
@@ -5,6 +5,8 @@
#ifndef _E1000_LOGS_H_
#define _E1000_LOGS_H_
+#include <rte_log.h>
+
extern int e1000_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, e1000_logtype_init, \
@@ -41,4 +43,8 @@ extern int e1000_logtype_driver;
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+/* log init function shared by e1000 and igb drivers */
+void e1000_igb_init_log(void);
+
#endif /* _E1000_LOGS_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 242375ff..7039dc10 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -11,7 +11,6 @@
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_byteorder.h>
-#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -20,7 +19,6 @@
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_eal.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -94,6 +92,8 @@ static int em_get_rx_buffer_size(struct e1000_hw *hw);
static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr);
static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
@@ -105,9 +105,6 @@ static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
static enum e1000_fc_mode em_fc_setting = e1000_fc_full;
-int e1000_logtype_init;
-int e1000_logtype_driver;
-
/*
* The set of PCI devices this driver supports
*/
@@ -190,6 +187,7 @@ static const struct eth_dev_ops eth_em_ops = {
.dev_led_off = eth_em_led_off,
.flow_ctrl_get = eth_em_flow_ctrl_get,
.flow_ctrl_set = eth_em_flow_ctrl_set,
+ .mac_addr_set = eth_em_default_mac_addr_set,
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove = eth_em_rar_clear,
.set_mc_addr_list = eth_em_set_mc_addr_list,
@@ -197,57 +195,6 @@ static const struct eth_dev_ops eth_em_ops = {
.txq_info_get = em_txq_info_get,
};
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
/**
* eth_em_dev_is_ich8 - Check for ICH8 device
@@ -506,6 +453,7 @@ eth_em_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+
PMD_INIT_FUNC_TRACE();
return 0;
@@ -802,7 +750,7 @@ eth_em_stop(struct rte_eth_dev *dev)
/* clear the recorded link status */
memset(&link, 0, sizeof(link));
- rte_em_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
@@ -1069,9 +1017,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu
return 0;
}
-static uint32_t
-em_get_max_pktlen(const struct e1000_hw *hw)
+uint32_t
+em_get_max_pktlen(struct rte_eth_dev *dev)
{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
@@ -1100,20 +1050,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
- dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
+ dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
/*
* Starting with 631xESB hw supports 2 TX/RX queues per port.
@@ -1135,6 +1074,13 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
+ dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = E1000_MAX_RING_DESC,
.nb_min = E1000_MIN_RING_DESC,
@@ -1152,6 +1098,12 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_1G;
+
+ /* Preferred queue parameters */
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_txportconf.ring_size = 256;
+ dev_info->default_rxportconf.ring_size = 256;
}
/* return 0 means link status changed, -1 means not changed */
@@ -1160,7 +1112,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
int link_check, count;
link_check = 0;
@@ -1195,8 +1147,6 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL);
}
memset(&link, 0, sizeof(link));
- rte_em_dev_atomic_read_link_status(dev, &link);
- old = link;
/* Now we check if a transition has happened */
if (link_check && (link.link_status == ETH_LINK_DOWN)) {
@@ -1210,19 +1160,13 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
} else if (!link_check && (link.link_status == ETH_LINK_UP)) {
- link.link_speed = 0;
+ link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_status = ETH_LINK_DOWN;
link.link_autoneg = ETH_LINK_FIXED;
}
- rte_em_dev_atomic_write_link_status(dev, &link);
- /* not changed */
- if (old.link_status == link.link_status)
- return -1;
-
- /* changed */
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
/*
@@ -1460,15 +1404,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
static int
eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
if(mask & ETH_VLAN_STRIP_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
em_vlan_hw_strip_enable(dev);
else
em_vlan_hw_strip_disable(dev);
}
if(mask & ETH_VLAN_FILTER_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
em_vlan_hw_filter_enable(dev);
else
em_vlan_hw_filter_disable(dev);
@@ -1631,8 +1578,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
if (ret < 0)
return 0;
- memset(&link, 0, sizeof(link));
- rte_em_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
+
if (link.link_status) {
PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id, link.link_speed,
@@ -1810,6 +1757,15 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
}
static int
+eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ eth_em_rar_clear(dev, 0);
+
+ return eth_em_rar_set(dev, (void *)addr, 0, 0);
+}
+
+static int
eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct rte_eth_dev_info dev_info;
@@ -1835,10 +1791,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl &= ~E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
@@ -1864,14 +1822,10 @@ RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(e1000_init_log);
+/* see e1000_logs.c */
+RTE_INIT(igb_init_log);
static void
-e1000_init_log(void)
+igb_init_log(void)
{
- e1000_logtype_init = rte_log_register("pmd.net.e1000.init");
- if (e1000_logtype_init >= 0)
- rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE);
- e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver");
- if (e1000_logtype_driver >= 0)
- rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE);
+ e1000_igb_init_log();
}
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 02fae100..a6b3e92a 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -85,6 +85,7 @@ struct em_rx_queue {
struct em_rx_entry *sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
@@ -163,6 +164,7 @@ struct em_tx_queue {
uint8_t wthresh; /**< Write-back threshold register. */
struct em_ctx_info ctx_cache;
/**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
};
#if 1
@@ -1151,6 +1153,36 @@ em_reset_tx_queue(struct em_tx_queue *txq)
memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
}
+uint64_t
+em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+
+ RTE_SET_USED(dev);
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ return tx_offload_capa;
+}
+
+uint64_t
+em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_queue_offload_capa;
+
+ /*
+ * As only one Tx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev);
+
+ return tx_queue_offload_capa;
+}
+
int
eth_em_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1163,9 +1195,12 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
struct e1000_hw *hw;
uint32_t tsize;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -1269,6 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
em_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
+ txq->offloads = offloads;
return 0;
}
@@ -1313,6 +1349,43 @@ em_reset_rx_queue(struct em_rx_queue *rxq)
rxq->pkt_last_seg = NULL;
}
+uint64_t
+em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+ uint32_t max_rx_pktlen;
+
+ max_rx_pktlen = em_get_max_pktlen(dev);
+
+ rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+ if (max_rx_pktlen > ETHER_MAX_LEN)
+ rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_queue_offload_capa;
+
+ /*
+ * As only one Rx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
+
+ return rx_queue_offload_capa;
+}
+
int
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1325,9 +1398,12 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
struct em_rx_queue *rxq;
struct e1000_hw *hw;
uint32_t rsize;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -1382,8 +1458,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
@@ -1395,6 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
dev->data->rx_queues[queue_idx] = rxq;
em_reset_rx_queue(rxq);
+ rxq->offloads = offloads;
return 0;
}
@@ -1646,6 +1723,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
struct em_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode;
uint32_t rctl;
uint32_t rfctl;
uint32_t rxcsum;
@@ -1654,6 +1732,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
int ret;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ rxmode = &dev->data->dev_conf.rxmode;
/*
* Make sure receives are disabled while setting
@@ -1714,8 +1793,8 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* call to configure
*/
rxq->crc_len =
- (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
- 0 : ETHER_CRC_LEN);
+ (uint8_t)(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(i),
@@ -1745,7 +1824,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* to avoid splitting packets that don't fit into
* one buffer.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame ||
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
rctl_bsize < ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@@ -1755,7 +1834,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
}
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1768,7 +1847,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
- if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1780,21 +1859,21 @@ eth_em_rx_init(struct rte_eth_dev *dev)
if ((hw->mac.type == e1000_ich9lan ||
hw->mac.type == e1000_pch2lan ||
hw->mac.type == e1000_ich10lan) &&
- dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
}
if (hw->mac.type == e1000_pch2lan) {
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
else
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
}
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
else
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
@@ -1814,7 +1893,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
/*
* Configure support of jumbo frames, if any.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
rctl |= E1000_RCTL_LPE;
else
rctl &= ~E1000_RCTL_LPE;
@@ -1894,6 +1973,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -1911,4 +1991,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.offloads = txq->offloads;
}
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 3c5138de..edc7be31 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -20,7 +20,6 @@
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_eal.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -42,8 +41,6 @@
#define IGB_DEFAULT_TX_HTHRESH 1
#define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
-#define IGB_HKEY_MAX_INDEX 10
-
/* Bit shift and mask */
#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
@@ -146,7 +143,7 @@ static int eth_igb_rar_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
-static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
+static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr);
static void igbvf_intr_disable(struct e1000_hw *hw);
@@ -171,7 +168,7 @@ static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
-static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
+static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr);
static int igbvf_get_reg_length(struct rte_eth_dev *dev);
static int igbvf_get_regs(struct rte_eth_dev *dev,
@@ -224,6 +221,10 @@ static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
+static int eth_igb_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
@@ -403,6 +404,8 @@ static const struct eth_dev_ops eth_igb_ops = {
.get_eeprom_length = eth_igb_get_eeprom_length,
.get_eeprom = eth_igb_get_eeprom,
.set_eeprom = eth_igb_set_eeprom,
+ .get_module_info = eth_igb_get_module_info,
+ .get_module_eeprom = eth_igb_get_module_eeprom,
.timesync_adjust_time = igb_timesync_adjust_time,
.timesync_read_time = igb_timesync_read_time,
.timesync_write_time = igb_timesync_write_time,
@@ -522,57 +525,6 @@ static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
#define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
sizeof(rte_igbvf_stats_strings[0]))
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
static inline void
igb_intr_enable(struct rte_eth_dev *dev)
@@ -1559,7 +1511,7 @@ eth_igb_stop(struct rte_eth_dev *dev)
/* clear the recorded link status */
memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
@@ -1635,7 +1587,7 @@ eth_igb_close(struct rte_eth_dev *dev)
}
memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -2196,22 +2148,15 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
switch (hw->mac.type) {
case e1000_82575:
@@ -2274,6 +2219,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2282,7 +2228,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.hthresh = IGB_DEFAULT_TX_HTHRESH,
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
- .txq_flags = 0,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -2325,14 +2271,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -2353,6 +2294,13 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
break;
}
+ dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = IGB_DEFAULT_RX_PTHRESH,
@@ -2361,6 +2309,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2369,7 +2318,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.hthresh = IGB_DEFAULT_TX_HTHRESH,
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
- .txq_flags = 0,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -2382,7 +2331,7 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
int link_check, count;
link_check = 0;
@@ -2423,8 +2372,6 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
}
memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_read_link_status(dev, &link);
- old = link;
/* Now we check if a transition has happened */
if (link_check) {
@@ -2443,14 +2390,8 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_status = ETH_LINK_DOWN;
link.link_autoneg = ETH_LINK_FIXED;
}
- rte_igb_dev_atomic_write_link_status(dev, &link);
- /* not changed */
- if (old.link_status == link.link_status)
- return -1;
-
- /* changed */
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
/*
@@ -2704,7 +2645,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
VLAN_TAG_SIZE);
@@ -2723,7 +2664,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * VLAN_TAG_SIZE);
@@ -2732,22 +2673,25 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
static int
eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
if(mask & ETH_VLAN_STRIP_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
igb_vlan_hw_strip_enable(dev);
else
igb_vlan_hw_strip_disable(dev);
}
if(mask & ETH_VLAN_FILTER_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
igb_vlan_hw_filter_enable(dev);
else
igb_vlan_hw_filter_disable(dev);
}
if(mask & ETH_VLAN_EXTEND_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
igb_vlan_hw_extend_enable(dev);
else
igb_vlan_hw_extend_disable(dev);
@@ -2887,8 +2831,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
if (ret < 0)
return 0;
- memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
if (link.link_status) {
PMD_INIT_LOG(INFO,
" Port %d: Link Up - speed %u Mbps - %s",
@@ -3146,13 +3089,14 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
e1000_rar_set(hw, addr, index);
}
-static void
+static int
eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
eth_igb_rar_clear(dev, 0);
-
eth_igb_rar_set(dev, (void *)addr, 0, 0);
+
+ return 0;
}
/*
* Virtual Function operations
@@ -3250,14 +3194,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.hw_strip_crc) {
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
#endif
@@ -3504,7 +3448,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
return 0;
}
-static void
+static int
igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct e1000_hw *hw =
@@ -3512,6 +3456,7 @@ igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
/* index is not used by rar_set() */
hw->mac.ops.rar_set(hw, (void *)addr, 0);
+ return 0;
}
@@ -4499,10 +4444,12 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl &= ~E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
@@ -5384,6 +5331,86 @@ eth_igb_set_eeprom(struct rte_eth_dev *dev,
}
static int
+eth_igb_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ uint32_t status = 0;
+ uint16_t sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ if (hw->phy.media_type == e1000_media_type_copper ||
+ hw->phy.media_type == e1000_media_type_unknown)
+ return -EOPNOTSUPP;
+
+ /* Check whether we support SFF-8472 or not */
+ status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
+ if (status)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
+ if (status)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
+ PMD_DRV_LOG(ERR,
+ "Address change required to access page 0xA2, "
+ "but not supported. Please report the module "
+ "type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
+ /* We have an SFP, but it does not support SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have an SFP which supports a revision of SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int
+eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ uint32_t status = 0;
+ uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1];
+ u16 first_word, last_word;
+ int i = 0;
+
+ if (info->length == 0)
+ return -EINVAL;
+
+ first_word = info->offset >> 1;
+ last_word = (info->offset + info->length - 1) >> 1;
+
+ /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2,
+ &dataword[i]);
+ if (status) {
+ /* Error occurred while reading module */
+ return -EIO;
+ }
+
+ dataword[i] = rte_be_to_cpu_16(dataword[i]);
+ }
+
+ memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length);
+
+ return 0;
+}
+
+static int
eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct e1000_hw *hw =
@@ -5631,7 +5658,7 @@ igb_rss_filter_restore(struct rte_eth_dev *dev)
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
}
@@ -5654,3 +5681,11 @@ RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci
RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
+
+/* see e1000_logs.c */
+RTE_INIT(e1000_init_log);
+static void
+e1000_init_log(void)
+{
+ e1000_igb_init_log();
+}
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
index a1427596..07385291 100644
--- a/drivers/net/e1000/igb_flow.c
+++ b/drivers/net/e1000/igb_flow.c
@@ -175,7 +175,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
/**
* Only support src & dst addresses, protocol,
* others should be masked.
@@ -198,7 +198,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
filter->proto_mask = ipv4_mask->hdr.next_proto_id;
- ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
filter->dst_ip = ipv4_spec->hdr.dst_addr;
filter->src_ip = ipv4_spec->hdr.src_addr;
filter->proto = ipv4_spec->hdr.next_proto_id;
@@ -228,7 +228,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* get the TCP/UDP/SCTP info */
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
if (item->spec && item->mask) {
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
/**
* Only support src & dst ports, tcp flags,
@@ -263,14 +263,14 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags;
}
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
if (item->spec && item->mask) {
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -289,14 +289,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
}
} else {
if (item->spec && item->mask) {
- sctp_mask = (const struct rte_flow_item_sctp *)
- item->mask;
+ sctp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -380,6 +379,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -533,8 +541,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Mask bits of source MAC address must be full of 0.
* Mask bits of destination MAC address must be full
@@ -625,6 +633,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
if (attr->priority) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
@@ -848,8 +864,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
tcp_mask->hdr.src_port ||
tcp_mask->hdr.dst_port ||
@@ -924,6 +940,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Support 2 priorities, the lowest or highest. */
if (!attr->priority) {
filter->hig_pri = 0;
@@ -1065,8 +1090,8 @@ item_loop:
return -rte_errno;
}
- raw_spec = (const struct rte_flow_item_raw *)item->spec;
- raw_mask = (const struct rte_flow_item_raw *)item->mask;
+ raw_spec = item->spec;
+ raw_mask = item->mask;
if (!raw_mask->length ||
!raw_mask->relative) {
@@ -1212,6 +1237,15 @@ item_loop:
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_flex_filter));
rte_flow_error_set(error, EINVAL,
@@ -1293,7 +1327,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
rss = (const struct rte_flow_action_rss *)act->conf;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@@ -1301,7 +1335,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1311,14 +1345,26 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
}
}
- if (rss->rss_conf)
- rss_conf->rss_conf = *rss->rss_conf;
- else
- rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL;
-
- for (n = 0; n < rss->num; ++n)
- rss_conf->queue[n] = rss->queue[n];
- rss_conf->num = rss->num;
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (igb_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
/* check if the next not void item is END */
index++;
@@ -1350,6 +1396,15 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
@@ -1519,9 +1574,8 @@ igb_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&rss_filter_ptr->filter_info,
- &rss_conf,
- sizeof(struct igb_rte_flow_rss_conf));
+ igb_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
TAILQ_INSERT_TAIL(&igb_filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
@@ -1758,7 +1812,7 @@ igb_clear_rss_filter(struct rte_eth_dev *dev)
struct e1000_filter_info *filter =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter->rss_info.num)
+ if (filter->rss_info.conf.queue_num)
igb_config_rss_filter(dev, &filter->rss_info, FALSE);
}
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 2f371672..5f729f27 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -107,6 +107,7 @@ struct igb_rx_queue {
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint32_t flags; /**< RX flags. */
+ uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
};
/**
@@ -180,6 +181,7 @@ struct igb_tx_queue {
/**< Start context position for transmit queue. */
struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
/**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
};
#if 1
@@ -1447,6 +1449,33 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
igb_reset_tx_queue_stat(txq);
}
+uint64_t
+igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+
+ RTE_SET_USED(dev);
+ rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_queue_offload_capa;
+
+ rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
+
+ return rx_queue_offload_capa;
+}
+
int
eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1458,6 +1487,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
struct igb_tx_queue *txq;
struct e1000_hw *hw;
uint32_t size;
+ uint64_t offloads;
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1542,6 +1574,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
dev->tx_pkt_burst = eth_igb_xmit_pkts;
dev->tx_pkt_prepare = &eth_igb_prep_pkts;
dev->data->tx_queues[queue_idx] = txq;
+ txq->offloads = offloads;
return 0;
}
@@ -1593,6 +1626,45 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq)
rxq->pkt_last_seg = NULL;
}
+uint64_t
+igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+
+ RTE_SET_USED(dev);
+ rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_queue_offload_capa;
+
+ switch (hw->mac.type) {
+ case e1000_vfadapt_i350:
+ /*
+ * As only one Rx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
+ break;
+ default:
+ rx_queue_offload_capa = 0;
+ }
+ return rx_queue_offload_capa;
+}
+
int
eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1605,6 +1677,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
struct igb_rx_queue *rxq;
struct e1000_hw *hw;
unsigned int size;
+ uint64_t offloads;
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1630,6 +1705,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (rxq == NULL)
return -ENOMEM;
+ rxq->offloads = offloads;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->pthresh = rx_conf->rx_thresh.pthresh;
@@ -1644,8 +1720,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
- ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
@@ -2227,6 +2303,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
int
eth_igb_rx_init(struct rte_eth_dev *dev)
{
+ struct rte_eth_rxmode *rxmode;
struct e1000_hw *hw;
struct igb_rx_queue *rxq;
uint32_t rctl;
@@ -2247,10 +2324,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rctl = E1000_READ_REG(hw, E1000_RCTL);
E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ rxmode = &dev->data->dev_conf.rxmode;
+
/*
* Configure support of jumbo frames, if any.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
rctl |= E1000_RCTL_LPE;
/*
@@ -2292,9 +2371,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- rxq->crc_len =
- (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
@@ -2362,7 +2440,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2406,16 +2484,24 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxcsum |= E1000_RXCSUM_PCSD;
/* Enable both L3/L4 rx checksum offload */
- if (dev->data->dev_conf.rxmode.hw_ip_checksum)
- rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
- E1000_RXCSUM_CRCOFL);
+ if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ rxcsum |= E1000_RXCSUM_IPOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_IPOFL;
+ if (rxmode->offloads &
+ (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+ rxcsum |= E1000_RXCSUM_TUOFL;
else
- rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
- E1000_RXCSUM_CRCOFL);
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ rxcsum |= E1000_RXCSUM_CRCOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_CRCOFL;
+
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
/* set STRCRC bit in all queues */
@@ -2654,7 +2740,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2741,6 +2827,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -2756,6 +2843,41 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_thresh.pthresh = txq->pthresh;
qinfo->conf.tx_thresh.hthresh = txq->hthresh;
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+ qinfo->conf.offloads = txq->offloads;
+}
+
+int
+igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+igb_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
}
int
@@ -2764,7 +2886,12 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
{
uint32_t shift;
uint16_t i, j;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2772,8 +2899,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!add) {
- if (memcmp(conf, &filter_info->rss_info,
- sizeof(struct igb_rte_flow_rss_conf)) == 0) {
+ if (igb_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
igb_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct igb_rte_flow_rss_conf));
@@ -2782,7 +2909,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table. */
@@ -2794,9 +2921,9 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
} reta;
uint8_t q_idx;
- q_idx = conf->queue[j];
- if (j == conf->num)
+ if (j == conf->conf.queue_num)
j = 0;
+ q_idx = conf->conf.queue[j];
reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
if ((i & 3) == 3)
E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
@@ -2813,8 +2940,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
rss_conf.rss_key = rss_intel_key; /* Default hash key */
igb_hw_rss_hash_set(hw, &rss_conf);
- rte_memcpy(&filter_info->rss_info,
- conf, sizeof(struct igb_rte_flow_rss_conf));
+ if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/e1000/meson.build b/drivers/net/e1000/meson.build
index 3a1bf5af..cf456995 100644
--- a/drivers/net/e1000/meson.build
+++ b/drivers/net/e1000/meson.build
@@ -5,6 +5,7 @@ subdir('base')
objs = [base_objs]
sources = files(
+ 'e1000_logs.c',
'em_ethdev.c',
'em_rxtx.c',
'igb_ethdev.c',
diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile
index f9bfe053..43339f3b 100644
--- a/drivers/net/ena/Makefile
+++ b/drivers/net/ena/Makefile
@@ -43,6 +43,9 @@ INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base
EXPORT_MAP := rte_pmd_ena_version.map
LIBABIVER := 1
+# rte_fbarray is not yet part of stable API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
VPATH += $(SRCDIR)/base
#
# all source are stored in SRCS-y
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 8cba319e..93345199 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -188,7 +188,8 @@ typedef uint64_t dma_addr_t;
ENA_TOUCH(dmadev); ENA_TOUCH(handle); \
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \
+ mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, \
+ RTE_MEMZONE_IOVA_CONTIG); \
memset(mz->addr, 0, size); \
virt = mz->addr; \
phys = mz->iova; \
@@ -206,7 +207,8 @@ typedef uint64_t dma_addr_t;
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, node, 0); \
+ mz = rte_memzone_reserve(z_name, size, node, \
+ RTE_MEMZONE_IOVA_CONTIG); \
memset(mz->addr, 0, size); \
virt = mz->addr; \
phys = mz->iova; \
@@ -219,7 +221,8 @@ typedef uint64_t dma_addr_t;
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, node, 0); \
+ mz = rte_memzone_reserve(z_name, size, node, \
+ RTE_MEMZONE_IOVA_CONTIG); \
memset(mz->addr, 0, size); \
virt = mz->addr; \
} while (0)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 34b2a8d7..c595cc7e 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -238,10 +238,6 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads);
static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
@@ -264,11 +260,15 @@ static const struct eth_dev_ops ena_dev_ops = {
static inline int ena_cpu_to_node(int cpu)
{
struct rte_config *config = rte_eal_get_configuration();
+ struct rte_fbarray *arr = &config->mem_config->memzones;
+ const struct rte_memzone *mz;
- if (likely(cpu < RTE_MAX_MEMZONE))
- return config->mem_config->memzone[cpu].socket_id;
+ if (unlikely(cpu >= RTE_MAX_MEMZONE))
+ return NUMA_NO_NODE;
- return NUMA_NO_NODE;
+ mz = rte_fbarray_get(arr, cpu);
+
+ return mz->socket_id;
}
static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
@@ -724,7 +724,7 @@ static int ena_link_update(struct rte_eth_dev *dev,
{
struct rte_eth_link *link = &dev->data->dev_link;
- link->link_status = 1;
+ link->link_status = ETH_LINK_UP;
link->link_speed = ETH_SPEED_NUM_10G;
link->link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -1001,12 +1001,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
- !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
- RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
- return -EINVAL;
- }
-
ena_qid = ENA_IO_TXQ_IDX(queue_idx);
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1061,7 +1055,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
for (i = 0; i < txq->ring_size; i++)
txq->empty_tx_reqs[i] = i;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
/* Store pointer to this queue in upper layer */
txq->configured = 1;
@@ -1074,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
__rte_unused unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct ena_com_create_io_ctx ctx =
@@ -1110,11 +1104,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
- RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
- return -EINVAL;
- }
-
ena_qid = ENA_IO_RXQ_IDX(queue_idx);
ctx.qid = ena_qid;
@@ -1418,22 +1407,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
{
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
- if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
- RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- tx_offloads, adapter->tx_supported_offloads);
- return -ENOTSUP;
- }
-
- if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- rx_offloads, adapter->rx_supported_offloads);
- return -ENOTSUP;
- }
if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1455,8 +1428,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
break;
}
- adapter->tx_selected_offloads = tx_offloads;
- adapter->rx_selected_offloads = rx_offloads;
+ adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+ adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
return 0;
}
@@ -1485,32 +1458,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
}
}
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads)
-{
- uint64_t port_offloads = adapter->tx_selected_offloads;
-
- /* Check if port supports all requested offloads.
- * True if all offloads selected for queue are set for port.
- */
- if ((offloads & port_offloads) != offloads)
- return false;
- return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads)
-{
- uint64_t port_offloads = adapter->rx_selected_offloads;
-
- /* Check if port supports all requested offloads.
- * True if all offloads selected for queue are set for port.
- */
- if ((offloads & port_offloads) != offloads)
- return false;
- return true;
-}
-
static void ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
@@ -1527,8 +1474,6 @@ static void ena_infos_get(struct rte_eth_dev *dev,
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 05b595eb..8483f76f 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -10,6 +10,7 @@
#include "vnic_dev.h"
#include "vnic_resource.h"
#include "vnic_devcmd.h"
+#include "vnic_nic.h"
#include "vnic_stats.h"
@@ -484,7 +485,7 @@ int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
* Retrun true in filter_tags if supported
*/
int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
- u8 *filter_tags)
+ u8 *filter_actions)
{
u64 args[4];
int err;
@@ -492,14 +493,10 @@ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
err = vnic_dev_advanced_filters_cap(vdev, args, 4);
- /* determine if filter tags are available */
- if (err)
- *filter_tags = 0;
- if ((args[2] == FILTER_CAP_MODE_V1) &&
- (args[3] & FILTER_ACTION_FILTER_ID_FLAG))
- *filter_tags = 1;
- else
- *filter_tags = 0;
+ /* determine supported filter actions */
+ *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
+ if (args[2] == FILTER_CAP_MODE_V1)
+ *filter_actions = args[3];
if (err || ((args[0] == 1) && (args[1] == 0))) {
/* Adv filter Command not supported or adv filters available but
@@ -587,17 +584,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
int wait = 1000;
- static u32 instance;
- char name[NAME_MAX];
- if (!vdev->stats) {
- snprintf((char *)name, sizeof(name),
- "vnic_stats-%u", instance++);
- vdev->stats = vdev->alloc_consistent(vdev->priv,
- sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name);
- if (!vdev->stats)
- return -ENOMEM;
- }
+ if (!vdev->stats)
+ return -ENOMEM;
*stats = vdev->stats;
a0 = vdev->stats_pa;
@@ -922,6 +911,18 @@ u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
return vdev->intr_coal_timer_info.max_usec;
}
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
+{
+ char name[NAME_MAX];
+ static u32 instance;
+
+ snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
+ vdev->stats = vdev->alloc_consistent(vdev->priv,
+ sizeof(struct vnic_stats),
+ &vdev->stats_pa, (u8 *)name);
+ return vdev->stats == NULL ? -ENOMEM : 0;
+}
+
void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
@@ -1044,3 +1045,36 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
return ret;
}
+
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
+{
+ u64 a0 = overlay;
+ u64 a1 = config;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
+}
+
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number)
+{
+ u64 a1 = vxlan_udp_port_number;
+ u64 a0 = overlay;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
+}
+
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
+{
+ u64 a0 = VIC_FEATURE_VXLAN;
+ u64 a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
+ /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
+ return ret == 0 &&
+ (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
+ (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
+}
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 8c099206..3c908430 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -108,7 +108,7 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd);
int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
- u8 *filter_tags);
+ u8 *filter_actions);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
@@ -165,6 +165,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
int vnic_dev_get_size(void);
int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
@@ -177,10 +178,9 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_v2 *data, struct filter_action_v2 *action_v2);
-#ifdef ENIC_VXLAN
-int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev,
u8 overlay, u8 config);
int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
u16 vxlan_udp_port_number);
-#endif
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index 6b95bc48..2865eb4d 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -600,6 +600,7 @@ enum filter_cap_mode {
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
@@ -840,7 +841,9 @@ struct filter_action {
#define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0)
#define FILTER_ACTION_FILTER_ID_FLAG (1 << 1)
+#define FILTER_ACTION_DROP_FLAG (1 << 2)
#define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \
+ | FILTER_ACTION_DROP_FLAG \
| FILTER_ACTION_FILTER_ID_FLAG)
/* Version 2 of filter action must be a strict extension of struct filter_action
@@ -1078,6 +1081,18 @@ typedef enum {
} vic_feature_t;
/*
+ * These flags are used in args[1] of devcmd CMD_GET_SUPP_FEATURE_VER
+ * to indicate the host driver about the VxLAN and Multi WQ features
+ * supported
+ */
+#define FEATURE_VXLAN_IPV6_INNER (1 << 0)
+#define FEATURE_VXLAN_IPV6_OUTER (1 << 1)
+#define FEATURE_VXLAN_MULTI_WQ (1 << 2)
+
+#define FEATURE_VXLAN_IPV6 (FEATURE_VXLAN_IPV6_INNER | \
+ FEATURE_VXLAN_IPV6_OUTER)
+
+/*
* CMD_CONFIG_GRPINTR subcommands
*/
typedef enum {
diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h
index 26918335..49504a7d 100644
--- a/drivers/net/enic/base/vnic_enet.h
+++ b/drivers/net/enic/base/vnic_enet.h
@@ -52,6 +52,10 @@ struct vnic_enet_config {
#define VENETF_VXLAN 0x10000 /* VxLAN offload */
#define VENETF_NVGRE 0x20000 /* NVGRE offload */
#define VENETF_GRPINTR 0x40000 /* group interrupt */
+#define VENETF_NICSWITCH 0x80000 /* NICSWITCH enabled */
+#define VENETF_RSSHASH_UDP_WEAK 0x100000 /* VIC has Bodega-style UDP RSS */
+#define VENETF_RSSHASH_UDPIPV4 0x200000 /* Hash on UDP + IPv4 fields */
+#define VENETF_RSSHASH_UDPIPV6 0x400000 /* Hash on UDP + IPv6 fields */
#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
diff --git a/drivers/net/enic/base/vnic_nic.h b/drivers/net/enic/base/vnic_nic.h
index a753b3a5..e318d0cb 100644
--- a/drivers/net/enic/base/vnic_nic.h
+++ b/drivers/net/enic/base/vnic_nic.h
@@ -27,12 +27,14 @@
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0)
#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index d774bb0d..9619290d 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -6,6 +6,7 @@
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
+#include <stdbool.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
@@ -69,6 +70,7 @@ struct vnic_rq {
struct rte_mbuf *pkt_last_seg;
unsigned int max_mbufs_per_pkt;
uint16_t tot_nb_desc;
+ bool need_initial_post;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
index 7c069c06..0135bffc 100644
--- a/drivers/net/enic/base/vnic_wq.h
+++ b/drivers/net/enic/base/vnic_wq.h
@@ -44,6 +44,7 @@ struct vnic_wq_buf {
struct vnic_wq {
unsigned int index;
+ uint64_t tx_offload_notsup_mask;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c083985e..ee83fe57 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -17,6 +17,7 @@
#include "vnic_rss.h"
#include "enic_res.h"
#include "cq_enet_desc.h"
+#include <stdbool.h>
#include <sys/queue.h>
#include <rte_spinlock.h>
@@ -49,6 +50,15 @@
#define ENICPMD_FDIR_MAX 64
+/*
+ * Interrupt 0: LSC and errors
+ * Interrupt 1: rx queue 0
+ * Interrupt 2: rx queue 1
+ * ...
+ */
+#define ENICPMD_LSC_INTR_OFFSET 0
+#define ENICPMD_RXQ_INTR_OFFSET 1
+
struct enic_fdir_node {
struct rte_eth_fdir_filter filter;
u16 fltr_id;
@@ -92,6 +102,7 @@ struct enic {
struct vnic_dev *vdev;
unsigned int port_id;
+ bool overlay_offload;
struct rte_eth_dev *rte_dev;
struct enic_fdir fdir;
char bdf_name[ENICPMD_BDF_LENGTH];
@@ -109,7 +120,9 @@ struct enic {
u16 max_mtu;
u8 adv_filters;
u32 flow_filter_mode;
- u8 filter_tags;
+ u8 filter_actions; /* HW supported actions */
+ bool vxlan;
+ bool disable_overlay; /* devargs disable_overlay=1 */
unsigned int flags;
unsigned int priv_flags;
@@ -126,9 +139,9 @@ struct enic {
struct vnic_cq *cq;
unsigned int cq_count; /* equals rq_count + wq_count */
- /* interrupt resource */
- struct vnic_intr intr;
- unsigned int intr_count;
+ /* interrupt vectors (len = conf_intr_count) */
+ struct vnic_intr *intr;
+ unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
/* software counters */
struct enic_soft_stats soft_stats;
@@ -146,8 +159,33 @@ struct enic {
LIST_HEAD(enic_flows, rte_flow) flows;
rte_spinlock_t flows_lock;
+
+ /* RSS */
+ uint16_t reta_size;
+ uint8_t hash_key_size;
+ uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
+ /*
+ * Keep a copy of current RSS config for queries, as we cannot retrieve
+ * it from the NIC.
+ */
+ uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
+ uint8_t rss_enable;
+ uint64_t rss_hf; /* ETH_RSS flags */
+ union vnic_rss_key rss_key;
+ union vnic_rss_cpu rss_cpu;
+
+ uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
+ uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_offload_mask; /* PKT_TX flags accepted */
};
+/* Compute ethdev's max packet size from MTU */
+static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
+{
+ /* ethdev max size includes eth and crc whereas NIC MTU does not */
+ return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+}
+
/* Get the CQ index from a Start of Packet(SOP) RQ index */
static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
{
@@ -220,54 +258,58 @@ enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
return idx;
}
-extern void enic_fdir_stats_get(struct enic *enic,
- struct rte_eth_fdir_stats *stats);
-extern int enic_fdir_add_fltr(struct enic *enic,
- struct rte_eth_fdir_filter *params);
-extern int enic_fdir_del_fltr(struct enic *enic,
- struct rte_eth_fdir_filter *params);
-extern void enic_free_wq(void *txq);
-extern int enic_alloc_intr_resources(struct enic *enic);
-extern int enic_setup_finish(struct enic *enic);
-extern int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
- unsigned int socket_id, uint16_t nb_desc);
-extern void enic_start_wq(struct enic *enic, uint16_t queue_idx);
-extern int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
-extern void enic_start_rq(struct enic *enic, uint16_t queue_idx);
-extern int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
-extern void enic_free_rq(void *rxq);
-extern int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
- unsigned int socket_id, struct rte_mempool *mp,
- uint16_t nb_desc, uint16_t free_thresh);
-extern int enic_set_rss_nic_cfg(struct enic *enic);
-extern int enic_set_vnic_res(struct enic *enic);
-extern int enic_enable(struct enic *enic);
-extern int enic_disable(struct enic *enic);
-extern void enic_remove(struct enic *enic);
-extern int enic_get_link_status(struct enic *enic);
-extern int enic_dev_stats_get(struct enic *enic,
- struct rte_eth_stats *r_stats);
-extern void enic_dev_stats_clear(struct enic *enic);
-extern void enic_add_packet_filter(struct enic *enic);
+void enic_fdir_stats_get(struct enic *enic,
+ struct rte_eth_fdir_stats *stats);
+int enic_fdir_add_fltr(struct enic *enic,
+ struct rte_eth_fdir_filter *params);
+int enic_fdir_del_fltr(struct enic *enic,
+ struct rte_eth_fdir_filter *params);
+void enic_free_wq(void *txq);
+int enic_alloc_intr_resources(struct enic *enic);
+int enic_setup_finish(struct enic *enic);
+int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, uint16_t nb_desc);
+void enic_start_wq(struct enic *enic, uint16_t queue_idx);
+int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
+void enic_start_rq(struct enic *enic, uint16_t queue_idx);
+int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
+void enic_free_rq(void *rxq);
+int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, struct rte_mempool *mp,
+ uint16_t nb_desc, uint16_t free_thresh);
+int enic_set_vnic_res(struct enic *enic);
+int enic_init_rss_nic_cfg(struct enic *enic);
+int enic_set_rss_conf(struct enic *enic,
+ struct rte_eth_rss_conf *rss_conf);
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
+int enic_set_vlan_strip(struct enic *enic);
+int enic_enable(struct enic *enic);
+int enic_disable(struct enic *enic);
+void enic_remove(struct enic *enic);
+int enic_get_link_status(struct enic *enic);
+int enic_dev_stats_get(struct enic *enic,
+ struct rte_eth_stats *r_stats);
+void enic_dev_stats_clear(struct enic *enic);
+void enic_add_packet_filter(struct enic *enic);
int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
-void enic_del_mac_address(struct enic *enic, int mac_index);
-extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
-extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- struct rte_mbuf *tx_pkt, unsigned short len,
- uint8_t sop, uint8_t eop, uint8_t cq_entry,
- uint16_t ol_flags, uint16_t vlan_tag);
-
-extern void enic_post_wq_index(struct vnic_wq *wq);
-extern int enic_probe(struct enic *enic);
-extern int enic_clsf_init(struct enic *enic);
-extern void enic_clsf_destroy(struct enic *enic);
+int enic_del_mac_address(struct enic *enic, int mac_index);
+unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
+void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
+ struct rte_mbuf *tx_pkt, unsigned short len,
+ uint8_t sop, uint8_t eop, uint8_t cq_entry,
+ uint16_t ol_flags, uint16_t vlan_tag);
+
+void enic_post_wq_index(struct vnic_wq *wq);
+int enic_probe(struct enic *enic);
+int enic_clsf_init(struct enic *enic);
+void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t enic_dummy_recv_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+ uint16_t nb_pkts);
uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 3ef1d083..9d95201e 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -111,7 +111,6 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
struct rte_eth_fdir_masks *masks)
{
struct filter_generic_1 *gp = &fltr->u.generic_1;
- int i;
fltr->type = FILTER_DPDK_1;
memset(gp, 0, sizeof(*gp));
@@ -273,18 +272,14 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
ipv6_mask.proto = masks->ipv6_mask.proto;
ipv6_val.proto = input->flow.ipv6_flow.proto;
}
- for (i = 0; i < 4; i++) {
- *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
- masks->ipv6_mask.src_ip[i];
- *(uint32_t *)&ipv6_val.src_addr[i * 4] =
- input->flow.ipv6_flow.src_ip[i];
- }
- for (i = 0; i < 4; i++) {
- *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
- masks->ipv6_mask.src_ip[i];
- *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
- input->flow.ipv6_flow.dst_ip[i];
- }
+ memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
+ sizeof(ipv6_mask.src_addr));
+ memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
+ sizeof(ipv6_val.src_addr));
+ memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
+ sizeof(ipv6_mask.dst_addr));
+ memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
+ sizeof(ipv6_val.dst_addr));
if (input->flow.ipv6_flow.tc) {
ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index d84714ef..28630892 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -11,6 +11,7 @@
#include <rte_bus_pci.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
#include <rte_string_fns.h>
#include "vnic_intr.h"
@@ -39,6 +40,8 @@ static const struct rte_pci_id pci_id_enic_map[] = {
{.vendor_id = 0, /* sentinel */},
};
+#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
+
RTE_INIT(enicpmd_init_log);
static void
enicpmd_init_log(void)
@@ -318,52 +321,40 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
return enicpmd_dev_setup_intr(enic);
}
-static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
- uint16_t vlan_id, int on)
-{
- struct enic *enic = pmd_priv(eth_dev);
- int err;
-
- ENICPMD_FUNC_TRACE();
- if (on)
- err = enic_add_vlan(enic, vlan_id);
- else
- err = enic_del_vlan(enic, vlan_id);
- return err;
-}
-
static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct enic *enic = pmd_priv(eth_dev);
+ uint64_t offloads;
ENICPMD_FUNC_TRACE();
+ offloads = eth_dev->data->dev_conf.rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
- if (eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
enic->ig_vlan_strip_en = 1;
else
enic->ig_vlan_strip_en = 0;
}
- enic_set_rss_nic_cfg(enic);
-
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if ((mask & ETH_VLAN_FILTER_MASK) &&
+ (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
dev_warning(enic,
"Configuration of VLAN filter is not supported\n");
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
+ if ((mask & ETH_VLAN_EXTEND_MASK) &&
+ (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
dev_warning(enic,
"Configuration of extended VLAN is not supported\n");
}
- return 0;
+ return enic_set_vlan_strip(enic);
}
static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
{
int ret;
+ int mask;
struct enic *enic = pmd_priv(eth_dev);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -378,9 +369,21 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CHECKSUM);
- ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
-
- return ret;
+ /* All vlan offload masks to apply the current settings */
+ mask = ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = enicpmd_vlan_offload_set(eth_dev, mask);
+ if (ret) {
+ dev_err(enic, "Failed to configure VLAN offloads\n");
+ return ret;
+ }
+ /*
+ * Initialize RSS with the default reta and key. If the user key is
+ * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
+ * default key.
+ */
+ return enic_init_rss_nic_cfg(enic);
}
/* Start the device.
@@ -410,10 +413,9 @@ static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
ENICPMD_FUNC_TRACE();
enic_disable(enic);
+
memset(&link, 0, sizeof(link));
- rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
- *(uint64_t *)&eth_dev->data->dev_link,
- *(uint64_t *)&link);
+ rte_eth_linkstatus_set(eth_dev, &link);
}
/*
@@ -459,27 +461,27 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
device_info->min_rx_bufsize = ENIC_MIN_MTU;
- device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
+ /* "Max" mtu is not a typo. HW receives packet sizes up to the
+ * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
+ * a hint to the driver to size receive buffers accordingly so that
+ * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
+ * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
+ * ignoring vNIC mtu.
+ */
+ device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
- device_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- device_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ device_info->rx_offload_capa = enic->rx_offload_capa;
+ device_info->tx_offload_capa = enic->tx_offload_capa;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
+ device_info->reta_size = enic->reta_size;
+ device_info->hash_key_size = enic->hash_key_size;
+ device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
}
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
@@ -571,7 +573,24 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
return;
ENICPMD_FUNC_TRACE();
- enic_del_mac_address(enic, index);
+ if (enic_del_mac_address(enic, index))
+ dev_err(enic, "del mac addr failed\n");
+}
+
+static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct ether_addr *addr)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ ENICPMD_FUNC_TRACE();
+ ret = enic_del_mac_address(enic, 0);
+ if (ret)
+ return ret;
+ return enic_set_mac_address(enic, addr->addr_bytes);
}
static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
@@ -582,6 +601,168 @@ static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
return enic_set_mtu(enic, mtu);
}
+static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64
+ *reta_conf,
+ uint16_t reta_size)
+{
+ struct enic *enic = pmd_priv(dev);
+ uint16_t i, idx, shift;
+
+ ENICPMD_FUNC_TRACE();
+ if (reta_size != ENIC_RSS_RETA_SIZE) {
+ dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
+ reta_size, ENIC_RSS_RETA_SIZE);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
+ enic->rss_cpu.cpu[i / 4].b[i % 4]);
+ }
+
+ return 0;
+}
+
+static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64
+ *reta_conf,
+ uint16_t reta_size)
+{
+ struct enic *enic = pmd_priv(dev);
+ union vnic_rss_cpu rss_cpu;
+ uint16_t i, idx, shift;
+
+ ENICPMD_FUNC_TRACE();
+ if (reta_size != ENIC_RSS_RETA_SIZE) {
+ dev_err(enic, "reta_update: wrong reta_size. given=%u"
+ " expected=%u\n",
+ reta_size, ENIC_RSS_RETA_SIZE);
+ return -EINVAL;
+ }
+ /*
+ * Start with the current reta and modify it per reta_conf, as we
+ * need to push the entire reta even if we only modify one entry.
+ */
+ rss_cpu = enic->rss_cpu;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ rss_cpu.cpu[i / 4].b[i % 4] =
+ enic_rte_rq_idx_to_sop_idx(
+ reta_conf[idx].reta[shift]);
+ }
+ return enic_set_rss_reta(enic, &rss_cpu);
+}
+
+static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_set_rss_conf(enic, rss_conf);
+}
+
+static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ ENICPMD_FUNC_TRACE();
+ if (rss_conf == NULL)
+ return -EINVAL;
+ if (rss_conf->rss_key != NULL &&
+ rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
+ dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
+ " expected=%u+\n",
+ rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+ rss_conf->rss_hf = enic->rss_hf;
+ if (rss_conf->rss_key != NULL) {
+ int i;
+ for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
+ rss_conf->rss_key[i] =
+ enic->rss_key.key[i / 10].b[i % 10];
+ }
+ rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
+ }
+ return 0;
+}
+
+static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct enic *enic = pmd_priv(dev);
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ struct rte_eth_rxconf *conf;
+ uint16_t sop_queue_idx;
+ uint16_t data_queue_idx;
+
+ ENICPMD_FUNC_TRACE();
+ sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
+ data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
+ rq_sop = &enic->rq[sop_queue_idx];
+ rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
+ qinfo->mp = rq_sop->mp;
+ qinfo->scattered_rx = rq_sop->data_queue_enable;
+ qinfo->nb_desc = rq_sop->ring.desc_count;
+ if (qinfo->scattered_rx)
+ qinfo->nb_desc += rq_data->ring.desc_count;
+ conf = &qinfo->conf;
+ memset(conf, 0, sizeof(*conf));
+ conf->rx_free_thresh = rq_sop->rx_free_thresh;
+ conf->rx_drop_en = 1;
+ /*
+ * Except VLAN stripping (port setting), all the checksum offloads
+ * are always enabled.
+ */
+ conf->offloads = enic->rx_offload_capa;
+ if (!enic->ig_vlan_strip_en)
+ conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ /* rx_thresh and other fields are not applicable for enic */
+}
+
+static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
+ __rte_unused uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ ENICPMD_FUNC_TRACE();
+ qinfo->nb_desc = enic->config.wq_desc_count;
+ memset(&qinfo->conf, 0, sizeof(qinfo->conf));
+ qinfo->conf.offloads = enic->tx_offload_capa;
+ /* tx_thresh, and all the other fields are not applicable for enic */
+}
+
+static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+ return 0;
+}
+
+static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+ return 0;
+}
+
static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_configure = enicpmd_dev_configure,
.dev_start = enicpmd_dev_start,
@@ -600,7 +781,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_infos_get = enicpmd_dev_info_get,
.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
.mtu_set = enicpmd_mtu_set,
- .vlan_filter_set = enicpmd_vlan_filter_set,
+ .vlan_filter_set = NULL,
.vlan_tpid_set = NULL,
.vlan_offload_set = enicpmd_vlan_offload_set,
.vlan_strip_queue_set = NULL,
@@ -614,6 +795,10 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.rx_descriptor_done = NULL,
.tx_queue_setup = enicpmd_dev_tx_queue_setup,
.tx_queue_release = enicpmd_dev_tx_queue_release,
+ .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
+ .rxq_info_get = enicpmd_dev_rxq_info_get,
+ .txq_info_get = enicpmd_dev_txq_info_get,
.dev_led_on = NULL,
.dev_led_off = NULL,
.flow_ctrl_get = NULL,
@@ -621,9 +806,57 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.priority_flow_ctrl_set = NULL,
.mac_addr_add = enicpmd_add_mac_addr,
.mac_addr_remove = enicpmd_remove_mac_addr,
+ .mac_addr_set = enicpmd_set_mac_addr,
.filter_ctrl = enicpmd_dev_filter_ctrl,
+ .reta_query = enicpmd_dev_rss_reta_query,
+ .reta_update = enicpmd_dev_rss_reta_update,
+ .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
+ .rss_hash_update = enicpmd_dev_rss_hash_update,
};
+static int enic_parse_disable_overlay(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "0") == 0) {
+ enic->disable_overlay = false;
+ } else if (strcmp(value, "1") == 0) {
+ enic->disable_overlay = true;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY
+ ": expected=0|1 given=%s\n", value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int enic_check_devargs(struct rte_eth_dev *dev)
+{
+ static const char *const valid_keys[] = {
+ ENIC_DEVARG_DISABLE_OVERLAY, NULL};
+ struct enic *enic = pmd_priv(dev);
+ struct rte_kvargs *kvlist;
+
+ ENICPMD_FUNC_TRACE();
+
+ enic->disable_overlay = false;
+ if (!dev->device->devargs)
+ return 0;
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+ if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
+ enic_parse_disable_overlay, enic) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
struct enic *enicpmd_list_head = NULL;
/* Initialize the driver
* It returns 0 on success.
@@ -633,6 +866,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pdev;
struct rte_pci_addr *addr;
struct enic *enic = pmd_priv(eth_dev);
+ int err;
ENICPMD_FUNC_TRACE();
@@ -651,6 +885,9 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
addr->domain, addr->bus, addr->devid, addr->function);
+ err = enic_check_devargs(eth_dev);
+ if (err)
+ return err;
return enic_probe(enic);
}
@@ -676,3 +913,5 @@ static struct rte_pci_driver rte_enic_pmd = {
RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_enic,
+ ENIC_DEVARG_DISABLE_OVERLAY "=<0|1> ");
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index 28923b0e..0cf04aef 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -3,6 +3,7 @@
*/
#include <errno.h>
+#include <stdint.h>
#include <rte_log.h>
#include <rte_ethdev_driver.h>
#include <rte_flow_driver.h>
@@ -273,21 +274,33 @@ static const enum rte_flow_action_type enic_supported_actions_v1[] = {
};
/** Supported actions for newer NICs */
-static const enum rte_flow_action_type enic_supported_actions_v2[] = {
+static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
RTE_FLOW_ACTION_TYPE_QUEUE,
RTE_FLOW_ACTION_TYPE_MARK,
RTE_FLOW_ACTION_TYPE_FLAG,
RTE_FLOW_ACTION_TYPE_END,
};
+static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_DROP,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
/** Action capabilities indexed by NIC version information */
static const struct enic_action_cap enic_action_cap[] = {
[FILTER_ACTION_RQ_STEERING_FLAG] = {
.actions = enic_supported_actions_v1,
.copy_fn = enic_copy_action_v1,
},
- [FILTER_ACTION_V2_ALL] = {
- .actions = enic_supported_actions_v2,
+ [FILTER_ACTION_FILTER_ID_FLAG] = {
+ .actions = enic_supported_actions_v2_id,
+ .copy_fn = enic_copy_action_v2,
+ },
+ [FILTER_ACTION_DROP_FLAG] = {
+ .actions = enic_supported_actions_v2_drop,
.copy_fn = enic_copy_action_v2,
},
};
@@ -544,16 +557,21 @@ enic_copy_item_vlan_v2(const struct rte_flow_item *item,
if (!spec)
return 0;
- /* Don't support filtering in tpid */
- if (mask) {
- if (mask->tpid != 0)
- return ENOTSUP;
- } else {
+ if (!mask)
mask = &rte_flow_item_vlan_mask;
- RTE_ASSERT(mask->tpid == 0);
- }
if (*inner_ofst == 0) {
+ struct ether_hdr *eth_mask =
+ (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
+ struct ether_hdr *eth_val =
+ (void *)gp->layer[FILTER_GENERIC_1_L2].val;
+
+ /* Outer TPID cannot be matched */
+ if (eth_mask->ether_type)
+ return ENOTSUP;
+ eth_mask->ether_type = mask->inner_type;
+ eth_val->ether_type = spec->inner_type;
+
/* Outer header. Use the vlan mask/val fields */
gp->mask_vlan = mask->tci;
gp->val_vlan = spec->tci;
@@ -952,6 +970,9 @@ static int
enic_copy_action_v1(const struct rte_flow_action actions[],
struct filter_action_v2 *enic_action)
{
+ enum { FATE = 1, };
+ uint32_t overlap = 0;
+
FLOW_TRACE();
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -963,6 +984,10 @@ enic_copy_action_v1(const struct rte_flow_action actions[],
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
actions->conf;
+
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
enic_action->rq_idx =
enic_rte_rq_idx_to_sop_idx(queue->index);
break;
@@ -972,6 +997,8 @@ enic_copy_action_v1(const struct rte_flow_action actions[],
break;
}
}
+ if (!(overlap & FATE))
+ return ENOTSUP;
enic_action->type = FILTER_ACTION_RQ_STEERING;
return 0;
}
@@ -989,6 +1016,9 @@ static int
enic_copy_action_v2(const struct rte_flow_action actions[],
struct filter_action_v2 *enic_action)
{
+ enum { FATE = 1, MARK = 2, };
+ uint32_t overlap = 0;
+
FLOW_TRACE();
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -997,6 +1027,10 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
actions->conf;
+
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
enic_action->rq_idx =
enic_rte_rq_idx_to_sop_idx(queue->index);
enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
@@ -1007,6 +1041,9 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
(const struct rte_flow_action_mark *)
actions->conf;
+ if (overlap & MARK)
+ return ENOTSUP;
+ overlap |= MARK;
/* ENIC_MAGIC_FILTER_ID is reserved and is the highest
* in the range of allows mark ids.
*/
@@ -1017,10 +1054,20 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
break;
}
case RTE_FLOW_ACTION_TYPE_FLAG: {
+ if (overlap & MARK)
+ return ENOTSUP;
+ overlap |= MARK;
enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
break;
}
+ case RTE_FLOW_ACTION_TYPE_DROP: {
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
+ enic_action->flags |= FILTER_ACTION_DROP_FLAG;
+ break;
+ }
case RTE_FLOW_ACTION_TYPE_VOID:
continue;
default:
@@ -1028,6 +1075,8 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
break;
}
}
+ if (!(overlap & FATE))
+ return ENOTSUP;
enic_action->type = FILTER_ACTION_V2;
return 0;
}
@@ -1059,10 +1108,14 @@ enic_get_filter_cap(struct enic *enic)
static const struct enic_action_cap *
enic_get_action_cap(struct enic *enic)
{
- static const struct enic_action_cap *ea;
-
- if (enic->filter_tags)
- ea = &enic_action_cap[FILTER_ACTION_V2_ALL];
+ const struct enic_action_cap *ea;
+ uint8_t actions;
+
+ actions = enic->filter_actions;
+ if (actions & FILTER_ACTION_DROP_FLAG)
+ ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
+ else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
+ ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
else
ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
return ea;
@@ -1268,6 +1321,12 @@ enic_flow_parse(struct rte_eth_dev *dev,
NULL,
"egress is not supported");
return -rte_errno;
+ } else if (attrs->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ return -rte_errno;
} else if (!attrs->ingress) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index ec9d343f..a25d303d 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -162,13 +162,12 @@ int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
return 0;
}
-void enic_del_mac_address(struct enic *enic, int mac_index)
+int enic_del_mac_address(struct enic *enic, int mac_index)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
- if (vnic_dev_del_addr(enic->vdev, mac_addr))
- dev_err(enic, "del mac addr failed\n");
+ return vnic_dev_del_addr(enic->vdev, mac_addr);
}
int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
@@ -200,10 +199,15 @@ void enic_init_vnic_resources(struct enic *enic)
{
unsigned int error_interrupt_enable = 1;
unsigned int error_interrupt_offset = 0;
+ unsigned int rxq_interrupt_enable = 0;
+ unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
unsigned int index = 0;
unsigned int cq_idx;
struct vnic_rq *data_rq;
+ if (enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ rxq_interrupt_enable = 1;
+
for (index = 0; index < enic->rq_count; index++) {
cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
@@ -225,11 +229,13 @@ void enic_init_vnic_resources(struct enic *enic)
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
- 0 /* interrupt_enable */,
+ rxq_interrupt_enable,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
- 0 /* interrupt offset */,
+ rxq_interrupt_offset,
0 /* cq_message_addr */);
+ if (rxq_interrupt_enable)
+ rxq_interrupt_offset++;
}
for (index = 0; index < enic->wq_count; index++) {
@@ -237,6 +243,9 @@ void enic_init_vnic_resources(struct enic *enic)
enic_cq_wq(enic, index),
error_interrupt_enable,
error_interrupt_offset);
+ /* Compute unsupported ol flags for enic_prep_pkts() */
+ enic->wq[index].tx_offload_notsup_mask =
+ PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
cq_idx = enic_cq_wq(enic, index);
vnic_cq_init(&enic->cq[cq_idx],
@@ -252,10 +261,12 @@ void enic_init_vnic_resources(struct enic *enic)
(u64)enic->wq[index].cqmsg_rz->iova);
}
- vnic_intr_init(&enic->intr,
- enic->config.intr_timer_usec,
- enic->config.intr_timer_type,
- /*mask_on_assertion*/1);
+ for (index = 0; index < enic->intr_count; index++) {
+ vnic_intr_init(&enic->intr[index],
+ enic->config.intr_timer_usec,
+ enic->config.intr_timer_type,
+ /*mask_on_assertion*/1);
+ }
}
@@ -266,6 +277,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
struct rq_enet_desc *rqd = rq->ring.descs;
unsigned i;
dma_addr_t dma_addr;
+ uint32_t max_rx_pkt_len;
+ uint16_t rq_buf_len;
if (!rq->in_use)
return 0;
@@ -273,6 +286,18 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
rq->ring.desc_count);
+ /*
+ * If *not* using scatter and the mbuf size is greater than the
+ * requested max packet size (max_rx_pkt_len), then reduce the
+ * posted buffer size to max_rx_pkt_len. HW still receives packets
+ * larger than max_rx_pkt_len, but they will be truncated, which we
+ * drop in the rx handler. Not ideal, but better than returning
+ * large packets when the user is not expecting them.
+ */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
+ if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
+ rq_buf_len = max_rx_pkt_len;
for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
mb = rte_mbuf_raw_alloc(rq->mp);
if (mb == NULL) {
@@ -287,9 +312,27 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
rq_enet_desc_enc(rqd, dma_addr,
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
: RQ_ENET_TYPE_NOT_SOP),
- mb->buf_len - RTE_PKTMBUF_HEADROOM);
+ rq_buf_len);
rq->mbuf_ring[i] = mb;
}
+ /*
+ * Do not post the buffers to the NIC until we enable the RQ via
+ * enic_start_rq().
+ */
+ rq->need_initial_post = true;
+ return 0;
+}
+
+/*
+ * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
+ * allocated the buffers and filled the RQ descriptor ring. Just need to push
+ * the post index to the NIC.
+ */
+static void
+enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
+{
+ if (!rq->in_use || !rq->need_initial_post)
+ return;
/* make sure all prior writes are complete before doing the PIO write */
rte_rmb();
@@ -304,9 +347,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
iowrite32(rq->posted_index, &rq->ctrl->posted_index);
iowrite32(0, &rq->ctrl->fetch_index);
rte_rmb();
-
- return 0;
-
+ rq->need_initial_post = false;
}
static void *
@@ -319,8 +360,8 @@ enic_alloc_consistent(void *priv, size_t size,
struct enic *enic = (struct enic *)priv;
struct enic_memzone_entry *mze;
- rz = rte_memzone_reserve_aligned((const char *)name,
- size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
+ rz = rte_memzone_reserve_aligned((const char *)name, size,
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
if (!rz) {
pr_err("%s : Failed to allocate memory requested for %s\n",
__func__, name);
@@ -379,16 +420,14 @@ enic_free_consistent(void *priv,
int enic_link_update(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
- int ret;
- int link_status = 0;
+ struct rte_eth_link link;
- link_status = enic_get_link_status(enic);
- ret = (link_status == enic->link_status);
- enic->link_status = link_status;
- eth_dev->data->dev_link.link_status = link_status;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
- return ret;
+ memset(&link, 0, sizeof(link));
+ link.link_status = enic_get_link_status(enic);
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = vnic_dev_port_speed(enic->vdev);
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
static void
@@ -397,13 +436,62 @@ enic_intr_handler(void *arg)
struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
struct enic *enic = pmd_priv(dev);
- vnic_intr_return_all_credits(&enic->intr);
+ vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
enic_link_update(enic);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
}
+static int enic_rxq_intr_init(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+ uint32_t rxq_intr_count, i;
+ int err;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ /*
+ * Rx queue interrupts only work when we have MSI-X interrupts,
+ * one per queue. Sharing one interrupt is technically
+ * possible with VIC, but it is not worth the complications it brings.
+ */
+ if (!rte_intr_cap_multiple(intr_handle)) {
+ dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
+ " (vfio-pci driver)\n");
+ return -ENOTSUP;
+ }
+ rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
+ err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
+ if (err) {
+ dev_err(enic, "Failed to enable event fds for Rx queue"
+ " interrupts\n");
+ return err;
+ }
+ intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
+ rxq_intr_count * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ dev_err(enic, "Failed to allocate intr_vec\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < rxq_intr_count; i++)
+ intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
+ return 0;
+}
+
+static void enic_rxq_intr_deinit(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
int enic_enable(struct enic *enic)
{
unsigned int index;
@@ -420,6 +508,9 @@ int enic_enable(struct enic *enic)
if (eth_dev->data->dev_conf.intr_conf.lsc)
vnic_dev_notify_set(enic->vdev, 0);
+ err = enic_rxq_intr_init(enic);
+ if (err)
+ return err;
if (enic_clsf_init(enic))
dev_warning(enic, "Init of hash table for clsf failed."\
"Flow director feature will not work\n");
@@ -457,7 +548,8 @@ int enic_enable(struct enic *enic)
enic_intr_handler, (void *)enic->rte_dev);
rte_intr_enable(&(enic->pdev->intr_handle));
- vnic_intr_unmask(&enic->intr);
+ /* Unmask LSC interrupt */
+ vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
return 0;
}
@@ -465,17 +557,21 @@ int enic_enable(struct enic *enic)
int enic_alloc_intr_resources(struct enic *enic)
{
int err;
+ unsigned int i;
dev_info(enic, "vNIC resources used: "\
"wq %d rq %d cq %d intr %d\n",
enic->wq_count, enic_vnic_rq_count(enic),
enic->cq_count, enic->intr_count);
- err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
- if (err)
- enic_free_vnic_resources(enic);
-
- return err;
+ for (i = 0; i < enic->intr_count; i++) {
+ err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
+ if (err) {
+ enic_free_vnic_resources(enic);
+ return err;
+ }
+ }
+ return 0;
}
void enic_free_rq(void *rxq)
@@ -539,10 +635,13 @@ void enic_start_rq(struct enic *enic, uint16_t queue_idx)
rq_data = &enic->rq[rq_sop->data_queue_idx];
struct rte_eth_dev *eth_dev = enic->rte_dev;
- if (rq_data->in_use)
+ if (rq_data->in_use) {
vnic_rq_enable(rq_data);
+ enic_initial_post_rx(enic, rq_data);
+ }
rte_mb();
vnic_rq_enable(rq_sop);
+ enic_initial_post_rx(enic, rq_sop);
eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -581,7 +680,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
unsigned int mbuf_size, mbufs_per_pkt;
unsigned int nb_sop_desc, nb_data_desc;
uint16_t min_sop, max_sop, min_data, max_data;
- uint16_t mtu = enic->rte_dev->data->mtu;
+ uint32_t max_rx_pkt_len;
rq_sop->is_sop = 1;
rq_sop->data_queue_idx = data_queue_idx;
@@ -599,22 +698,42 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
+ /* max_rx_pkt_len includes the ethernet header and CRC. */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (enic->rte_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SCATTER) {
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
- /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
- mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
- (mbuf_size - 1)) / mbuf_size;
+ /* ceil((max pkt len)/mbuf_size) */
+ mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
} else {
dev_info(enic, "Scatter rx mode disabled\n");
mbufs_per_pkt = 1;
+ if (max_rx_pkt_len > mbuf_size) {
+ dev_warning(enic, "The maximum Rx packet size (%u) is"
+ " larger than the mbuf size (%u), and"
+ " scatter is disabled. Larger packets will"
+ " be truncated.\n",
+ max_rx_pkt_len, mbuf_size);
+ }
}
if (mbufs_per_pkt > 1) {
dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
rq_sop->data_queue_enable = 1;
rq_data->in_use = 1;
+ /*
+ * HW does not directly support rxmode.max_rx_pkt_len. HW always
+ * receives packet sizes up to the "max" MTU.
+ * If not using scatter, we can achieve the effect of dropping
+ * larger packets by reducing the size of posted buffers.
+ * See enic_alloc_rx_queue_mbufs().
+ */
+ if (max_rx_pkt_len <
+ enic_mtu_to_max_rx_pktlen(enic->max_mtu)) {
+ dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
+ " when scatter rx mode is in use.\n");
+ }
} else {
dev_info(enic, "Rq %u Scatter rx mode not being used\n",
queue_idx);
@@ -654,8 +773,9 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
nb_data_desc = max_data;
}
if (mbufs_per_pkt > 1) {
- dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
- mtu, mbuf_size, min_sop + min_data,
+ dev_info(enic, "For max packet size %u and mbuf size %u valid"
+ " rx descriptor range is %u to %u\n",
+ max_rx_pkt_len, mbuf_size, min_sop + min_data,
max_sop + max_data);
}
dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
@@ -788,9 +908,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
instance++);
wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
- sizeof(uint32_t),
- SOCKET_ID_ANY, 0,
- ENIC_ALIGN);
+ sizeof(uint32_t), SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
if (!wq->cqmsg_rz)
return -ENOMEM;
@@ -802,8 +921,11 @@ int enic_disable(struct enic *enic)
unsigned int i;
int err;
- vnic_intr_mask(&enic->intr);
- (void)vnic_intr_masked(&enic->intr); /* flush write */
+ for (i = 0; i < enic->intr_count; i++) {
+ vnic_intr_mask(&enic->intr[i]);
+ (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
+ }
+ enic_rxq_intr_deinit(enic);
rte_intr_disable(&enic->pdev->intr_handle);
rte_intr_callback_unregister(&enic->pdev->intr_handle,
enic_intr_handler,
@@ -846,7 +968,8 @@ int enic_disable(struct enic *enic)
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
- vnic_intr_clean(&enic->intr);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_clean(&enic->intr[i]);
return 0;
}
@@ -879,9 +1002,10 @@ static int enic_dev_wait(struct vnic_dev *vdev,
static int enic_dev_open(struct enic *enic)
{
int err;
+ int flags = CMD_OPENF_IG_DESCCACHE;
err = enic_dev_wait(enic->vdev, vnic_dev_open,
- vnic_dev_open_done, 0);
+ vnic_dev_open_done, flags);
if (err)
dev_err(enic_get_dev(enic),
"vNIC device open failed, err %d\n", err);
@@ -889,44 +1013,42 @@ static int enic_dev_open(struct enic *enic)
return err;
}
-static int enic_set_rsskey(struct enic *enic)
+static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
{
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
- static union vnic_rss_key rss_key = {
- .key = {
- [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
- [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
- [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
- [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
- }
- };
- int err;
+ int err, i;
u8 name[NAME_MAX];
+ RTE_ASSERT(user_key != NULL);
snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
&rss_key_buf_pa, name);
if (!rss_key_buf_va)
return -ENOMEM;
- rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
+ for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
+ rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
err = enic_set_rss_key(enic,
rss_key_buf_pa,
sizeof(union vnic_rss_key));
+ /* Save for later queries */
+ if (!err) {
+ rte_memcpy(&enic->rss_key, rss_key_buf_va,
+ sizeof(union vnic_rss_key));
+ }
enic_free_consistent(enic, sizeof(union vnic_rss_key),
rss_key_buf_va, rss_key_buf_pa);
return err;
}
-static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
{
dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
- int i;
int err;
u8 name[NAME_MAX];
@@ -936,9 +1058,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
if (!rss_cpu_buf_va)
return -ENOMEM;
- for (i = 0; i < (1 << rss_hash_bits); i++)
- (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
- enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
+ rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
err = enic_set_rss_cpu(enic,
rss_cpu_buf_pa,
@@ -947,6 +1067,9 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
rss_cpu_buf_va, rss_cpu_buf_pa);
+ /* Save for later queries */
+ if (!err)
+ rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
return err;
}
@@ -956,8 +1079,6 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
const u8 tso_ipid_split_en = 0;
int err;
- /* Enable VLAN tag stripping */
-
err = enic_set_nic_cfg(enic,
rss_default_cpu, rss_hash_type,
rss_hash_bits, rss_base_cpu,
@@ -967,47 +1088,50 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
return err;
}
-int enic_set_rss_nic_cfg(struct enic *enic)
+/* Initialize RSS with defaults, called from dev_configure */
+int enic_init_rss_nic_cfg(struct enic *enic)
{
- const u8 rss_default_cpu = 0;
- const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
- const u8 rss_hash_bits = 7;
- const u8 rss_base_cpu = 0;
- u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
-
- if (rss_enable) {
- if (!enic_set_rsskey(enic)) {
- if (enic_set_rsscpu(enic, rss_hash_bits)) {
- rss_enable = 0;
- dev_warning(enic, "RSS disabled, "\
- "Failed to set RSS cpu indirection table.");
- }
- } else {
- rss_enable = 0;
- dev_warning(enic,
- "RSS disabled, Failed to set RSS key.\n");
+ static uint8_t default_rss_key[] = {
+ 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
+ 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
+ 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
+ 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
+ };
+ struct rte_eth_rss_conf rss_conf;
+ union vnic_rss_cpu rss_cpu;
+ int ret, i;
+
+ rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ /*
+ * If setting key for the first time, and the user gives us none, then
+ * push the default key to NIC.
+ */
+ if (rss_conf.rss_key == NULL) {
+ rss_conf.rss_key = default_rss_key;
+ rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
+ }
+ ret = enic_set_rss_conf(enic, &rss_conf);
+ if (ret) {
+ dev_err(enic, "Failed to configure RSS\n");
+ return ret;
+ }
+ if (enic->rss_enable) {
+ /* If enabling RSS, use the default reta */
+ for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
+ rss_cpu.cpu[i / 4].b[i % 4] =
+ enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
}
+ ret = enic_set_rss_reta(enic, &rss_cpu);
+ if (ret)
+ dev_err(enic, "Failed to set RSS indirection table\n");
}
-
- return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
- rss_hash_bits, rss_base_cpu, rss_enable);
+ return ret;
}
int enic_setup_finish(struct enic *enic)
{
- int ret;
-
enic_init_soft_stats(enic);
- ret = enic_set_rss_nic_cfg(enic);
- if (ret) {
- dev_err(enic, "Failed to config nic, aborting.\n");
- return -1;
- }
-
/* Default conf */
vnic_dev_packet_filter(enic->vdev,
1 /* directed */,
@@ -1022,6 +1146,112 @@ int enic_setup_finish(struct enic *enic)
return 0;
}
+static int enic_rss_conf_valid(struct enic *enic,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ /* RSS is disabled per VIC settings. Ignore rss_conf. */
+ if (enic->flow_type_rss_offloads == 0)
+ return 0;
+ if (rss_conf->rss_key != NULL &&
+ rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
+ dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
+ rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+ if (rss_conf->rss_hf != 0 &&
+ (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
+ dev_err(enic, "Given rss_hf contains none of the supported"
+ " types\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Set hash type and key according to rss_conf */
+int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *eth_dev;
+ uint64_t rss_hf;
+ u8 rss_hash_type;
+ u8 rss_enable;
+ int ret;
+
+ RTE_ASSERT(rss_conf != NULL);
+ ret = enic_rss_conf_valid(enic, rss_conf);
+ if (ret) {
+ dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
+ return ret;
+ }
+
+ eth_dev = enic->rte_dev;
+ rss_hash_type = 0;
+ rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
+ if (enic->rq_count > 1 &&
+ (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+ rss_hf != 0) {
+ rss_enable = 1;
+ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
+ if (ENIC_SETTING(enic, RSSHASH_UDP_WEAK)) {
+ /*
+ * 'TCP' is not a typo. The "weak" version of
+ * UDP RSS requires both the TCP and UDP bits
+ * be set. It does enable TCP RSS as well.
+ */
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ }
+ }
+ if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
+ ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
+ if (ENIC_SETTING(enic, RSSHASH_UDP_WEAK))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ }
+ } else {
+ rss_enable = 0;
+ rss_hf = 0;
+ }
+
+ /* Set the hash key if provided */
+ if (rss_enable && rss_conf->rss_key) {
+ ret = enic_set_rsskey(enic, rss_conf->rss_key);
+ if (ret) {
+ dev_err(enic, "Failed to set RSS key\n");
+ return ret;
+ }
+ }
+
+ ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ rss_enable);
+ if (!ret) {
+ enic->rss_hf = rss_hf;
+ enic->rss_hash_type = rss_hash_type;
+ enic->rss_enable = rss_enable;
+ }
+ return 0;
+}
+
+int enic_set_vlan_strip(struct enic *enic)
+{
+ /*
+ * Unfortunately, VLAN strip on/off and RSS on/off are configured
+ * together. So, re-do niccfg, preserving the current RSS settings.
+ */
+ return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ enic->rss_enable);
+}
+
void enic_add_packet_filter(struct enic *enic)
{
/* Args -> directed, multicast, broadcast, promisc, allmulti */
@@ -1043,6 +1273,7 @@ static void enic_dev_deinit(struct enic *enic)
rte_free(eth_dev->data->mac_addrs);
rte_free(enic->cq);
+ rte_free(enic->intr);
rte_free(enic->rq);
rte_free(enic->wq);
}
@@ -1052,12 +1283,16 @@ int enic_set_vnic_res(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
int rc = 0;
- unsigned int required_rq, required_wq, required_cq;
+ unsigned int required_rq, required_wq, required_cq, required_intr;
/* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
required_rq = eth_dev->data->nb_rx_queues * 2;
required_wq = eth_dev->data->nb_tx_queues;
required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+ required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ required_intr += eth_dev->data->nb_rx_queues;
+ }
if (enic->conf_rq_count < required_rq) {
dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
@@ -1076,11 +1311,18 @@ int enic_set_vnic_res(struct enic *enic)
required_cq, enic->conf_cq_count);
rc = -EINVAL;
}
+ if (enic->conf_intr_count < required_intr) {
+ dev_err(dev, "Not enough Interrupts to support Rx queue"
+ " interrupts. Required:%u, Configured:%u\n",
+ required_intr, enic->conf_intr_count);
+ rc = -EINVAL;
+ }
if (rc == 0) {
enic->rq_count = eth_dev->data->nb_rx_queues;
enic->wq_count = eth_dev->data->nb_tx_queues;
enic->cq_count = enic->rq_count + enic->wq_count;
+ enic->intr_count = required_intr;
}
return rc;
@@ -1176,20 +1418,26 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
"MTU (%u) is greater than value configured in NIC (%u)\n",
new_mtu, config_mtu);
- /* The easy case is when scatter is disabled. However if the MTU
- * becomes greater than the mbuf data size, packet drops will ensue.
+ /* Update the MTU and maximum packet length */
+ eth_dev->data->mtu = new_mtu;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ enic_mtu_to_max_rx_pktlen(new_mtu);
+
+ /*
+ * If the device has not started (enic_enable), nothing to do.
+ * Later, enic_enable() will set up RQs reflecting the new maximum
+ * packet length.
*/
- if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SCATTER)) {
- eth_dev->data->mtu = new_mtu;
+ if (!eth_dev->data->dev_started)
goto set_mtu_done;
- }
- /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
- * change Rx scatter mode if necessary for better performance. I.e. if
- * MTU was greater than the mbuf size and now it's less, scatter Rx
- * doesn't have to be used and vice versa.
- */
+ /*
+ * The device has started, re-do RQs on the fly. In the process, we
+ * pick up the new maximum packet length.
+ *
+ * Some applications rely on the ability to change MTU without stopping
+ * the device. So keep this behavior for now.
+ */
rte_spinlock_lock(&enic->mtu_lock);
/* Stop traffic on all RQs */
@@ -1214,12 +1462,12 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
/* now it is safe to reconfigure the RQs */
- /* update the mtu */
- eth_dev->data->mtu = new_mtu;
/* free and reallocate RQs with the new MTU */
for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (!rq->in_use)
+ continue;
enic_free_rq(rq);
rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
@@ -1282,6 +1530,8 @@ static int enic_dev_init(struct enic *enic)
/* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
enic->conf_cq_count, 8);
+ enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
+ enic->conf_intr_count, 8);
enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
enic->conf_rq_count, 8);
enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
@@ -1290,6 +1540,10 @@ static int enic_dev_init(struct enic *enic)
dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
return -1;
}
+ if (enic->conf_intr_count > 0 && enic->intr == NULL) {
+ dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
+ return -1;
+ }
if (enic->conf_rq_count > 0 && enic->rq == NULL) {
dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
return -1;
@@ -1319,6 +1573,27 @@ static int enic_dev_init(struct enic *enic)
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+ enic->overlay_offload = false;
+ if (!enic->disable_overlay && enic->vxlan &&
+ /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_ENABLE) == 0) {
+ enic->tx_offload_capa |=
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ /*
+ * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not
+ * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK).
+ */
+ enic->tx_offload_mask |=
+ PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_MASK;
+ enic->overlay_offload = true;
+ dev_info(enic, "Overlay offload is enabled\n");
+ }
+
return 0;
}
@@ -1351,6 +1626,15 @@ int enic_probe(struct enic *enic)
enic_alloc_consistent,
enic_free_consistent);
+ /*
+ * Allocate the consistent memory for stats upfront so both primary and
+ * secondary processes can dump stats.
+ */
+ err = vnic_dev_alloc_stats_mem(enic->vdev);
+ if (err) {
+ dev_err(enic, "Failed to allocate cmd memory, aborting\n");
+ goto err_out_unregister;
+ }
/* Issue device open to get device in known state */
err = enic_dev_open(enic);
if (err) {
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index c99d6183..6b404c3c 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -76,19 +76,24 @@ int enic_get_vnic_config(struct enic *enic)
? "" : "not "));
err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode,
- &enic->filter_tags);
+ &enic->filter_actions);
if (err) {
dev_err(enic_get_dev(enic),
"Error getting filter modes, %d\n", err);
return err;
}
- dev_info(enic, "Flow api filter mode: %s, Filter tagging %savailable\n",
+ dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s\n",
((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" :
((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" :
((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" :
"NONE"))),
- ((enic->filter_tags) ? "" : "not "));
+ ((enic->filter_actions & FILTER_ACTION_RQ_STEERING_FLAG) ?
+ "steer " : ""),
+ ((enic->filter_actions & FILTER_ACTION_FILTER_ID_FLAG) ?
+ "tag " : ""),
+ ((enic->filter_actions & FILTER_ACTION_DROP_FLAG) ?
+ "drop " : ""));
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
@@ -117,7 +122,10 @@ int enic_get_vnic_config(struct enic *enic)
"loopback tag 0x%04x\n",
ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
- ENIC_SETTING(enic, RSS) ? "yes" : "no",
+ ENIC_SETTING(enic, RSS) ?
+ (ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" :
+ ((ENIC_SETTING(enic, RSSHASH_UDP_WEAK) ? "+udp" :
+ "yes"))) : "no",
c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
@@ -128,6 +136,72 @@ int enic_get_vnic_config(struct enic *enic)
c->intr_timer_usec,
c->loop_tag);
+ /* RSS settings from vNIC */
+ enic->reta_size = ENIC_RSS_RETA_SIZE;
+ enic->hash_key_size = ENIC_RSS_HASH_KEY_SIZE;
+ enic->flow_type_rss_offloads = 0;
+ if (ENIC_SETTING(enic, RSSHASH_IPV4))
+ /*
+ * IPV4 hash type handles both non-frag and frag packet types.
+ * TCP/UDP is controlled via a separate flag below.
+ */
+ enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
+ ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+ if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (ENIC_SETTING(enic, RSSHASH_IPV6))
+ /*
+ * The VIC adapter can perform RSS on IPv6 packets with and
+ * without extension headers. An IPv6 "fragment" is an IPv6
+ * packet with the fragment extension header.
+ */
+ enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
+ ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER;
+ if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_TCP_EX;
+ if (ENIC_SETTING(enic, RSSHASH_UDP_WEAK))
+ enic->flow_type_rss_offloads |=
+ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+ if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+
+ /* Zero offloads if RSS is not enabled */
+ if (!ENIC_SETTING(enic, RSS))
+ enic->flow_type_rss_offloads = 0;
+
+ enic->vxlan = ENIC_SETTING(enic, VXLAN) &&
+ vnic_dev_capable_vxlan(enic->vdev);
+ /*
+ * Default hardware capabilities. enic_dev_init() may add additional
+ * flags if it enables overlay offloads.
+ */
+ enic->tx_offload_capa =
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ enic->rx_offload_capa =
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ enic->tx_offload_mask =
+ PKT_TX_VLAN_PKT |
+ PKT_TX_IP_CKSUM |
+ PKT_TX_L4_MASK |
+ PKT_TX_TCP_SEG;
+
return 0;
}
@@ -202,7 +276,8 @@ void enic_free_vnic_resources(struct enic *enic)
vnic_rq_free(&enic->rq[i]);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
- vnic_intr_free(&enic->intr);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_free(&enic->intr[i]);
}
void enic_get_res_counts(struct enic *enic)
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index cf3a6fde..e68f1307 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -31,6 +31,12 @@
#define ENIC_DEFAULT_RX_FREE_THRESH 32
#define ENIC_TX_XMIT_MAX 64
+#define ENIC_RSS_DEFAULT_CPU 0
+#define ENIC_RSS_BASE_CPU 0
+#define ENIC_RSS_HASH_BITS 7
+#define ENIC_RSS_RETA_SIZE (1 << ENIC_RSS_HASH_BITS)
+#define ENIC_RSS_HASH_KEY_SIZE 40
+
#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 2fe5a3fa..8853a204 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -15,15 +15,6 @@
#include <rte_ip.h>
#include <rte_tcp.h>
-#define ENIC_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
-
-#define ENIC_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK)
-
#define RTE_PMD_USE_PREFETCH
#ifdef RTE_PMD_USE_PREFETCH
@@ -130,30 +121,73 @@ enic_cq_rx_check_err(struct cq_desc *cqd)
/* Lookup table to translate RX CQ flags to mbuf flags. */
static inline uint32_t
-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
uint8_t cqrd_flags = cqrd->flags;
+ /*
+ * Odd-numbered entries are for tunnel packets. All packet type info
+ * applies to the inner packet, and there is no info on the outer
+ * packet. The outer flags in these entries exist only to avoid
+ * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
+ * afterwards.
+ *
+ * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
+ * RTE_PTYPE_TUNNEL_GRENAT..
+ */
static const uint32_t cq_type_table[128] __rte_cache_aligned = {
[0x00] = RTE_PTYPE_UNKNOWN,
[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
- [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
- [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
/* All others reserved */
};
cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
| CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
| CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
- return cq_type_table[cqrd_flags];
+ return cq_type_table[cqrd_flags + tnl];
}
static inline void
@@ -200,10 +234,18 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
uint32_t l4_flags;
l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
- if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
- pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
- else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4)
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ /*
+ * When overlay offload is enabled, the NIC may
+ * set ipv4_csum_ok=1 if the inner packet is IPv6..
+ * So, explicitly check for IPv4 before checking
+ * ipv4_csum_ok.
+ */
+ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+ if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ }
if (l4_flags == RTE_PTYPE_L4_UDP ||
l4_flags == RTE_PTYPE_L4_TCP) {
@@ -245,6 +287,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
struct vnic_cq *cq;
volatile struct cq_desc *cqd_ptr;
uint8_t color;
+ uint8_t tnl;
uint16_t seg_length;
struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
@@ -336,10 +379,21 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
continue;
}
+ /*
+ * When overlay offload is enabled, CQ.fcoe indicates the
+ * packet is tunnelled.
+ */
+ tnl = enic->overlay_offload &&
+ (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
/* cq rx flags are only valid if eop bit is set */
- first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
+ first_seg->packet_type =
+ enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
enic_cq_rx_to_pkt_flags(&cqd, first_seg);
-
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
if (unlikely(packet_error)) {
rte_pktmbuf_free(first_seg);
rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
@@ -443,9 +497,10 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
return 0;
}
-uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
int32_t ret;
uint16_t i;
uint64_t ol_flags;
@@ -454,8 +509,8 @@ uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
for (i = 0; i != nb_pkts; i++) {
m = tx_pkts[i];
ol_flags = m->ol_flags;
- if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ if (ol_flags & wq->tx_offload_notsup_mask) {
+ rte_errno = ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
@@ -558,6 +613,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
mss = tx_pkt->tso_segsz;
+ /* For tunnel, need the size of outer+inner headers */
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ header_len += tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len;
+ }
}
if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
diff --git a/drivers/net/enic/meson.build b/drivers/net/enic/meson.build
new file mode 100644
index 00000000..bfd4e237
--- /dev/null
+++ b/drivers/net/enic/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cisco Systems, Inc.
+
+sources = files(
+ 'base/vnic_cq.c',
+ 'base/vnic_dev.c',
+ 'base/vnic_intr.c',
+ 'base/vnic_rq.c',
+ 'base/vnic_rss.c',
+ 'base/vnic_wq.c',
+ 'enic_clsf.c',
+ 'enic_ethdev.c',
+ 'enic_flow.c',
+ 'enic_main.c',
+ 'enic_res.c',
+ 'enic_rxtx.c',
+ )
+deps += ['hash']
+includes += include_directories('base')
diff --git a/drivers/net/failsafe/Makefile b/drivers/net/failsafe/Makefile
index bd2f0198..81802d09 100644
--- a/drivers/net/failsafe/Makefile
+++ b/drivers/net/failsafe/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
-# Copyright 2017 6WIND S.A.
-# Copyright 2017 Mellanox.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 6WIND S.A.
+# Copyright 2017 Mellanox Technologies, Ltd
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index c499bfb9..eafbb75d 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <rte_alarm.h>
@@ -13,6 +13,8 @@
#include "failsafe_private.h"
+int failsafe_logtype;
+
const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
static const struct rte_eth_link eth_link = {
.link_speed = ETH_SPEED_NUM_10G,
@@ -202,16 +204,25 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
}
snprintf(priv->my_owner.name, sizeof(priv->my_owner.name),
FAILSAFE_OWNER_NAME);
+ DEBUG("Failsafe port %u owner info: %s_%016"PRIX64, dev->data->port_id,
+ priv->my_owner.name, priv->my_owner.id);
+ ret = rte_eth_dev_callback_register(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
+ failsafe_eth_new_event_callback,
+ dev);
+ if (ret) {
+ ERROR("Failed to register NEW callback");
+ goto free_args;
+ }
ret = failsafe_eal_init(dev);
if (ret)
- goto free_args;
+ goto unregister_new_callback;
ret = fs_mutex_init(priv);
if (ret)
- goto free_args;
+ goto unregister_new_callback;
ret = failsafe_hotplug_alarm_install(dev);
if (ret) {
ERROR("Could not set up plug-in event detection");
- goto free_args;
+ goto unregister_new_callback;
}
mac = &dev->data->mac_addrs[0];
if (mac_from_arg) {
@@ -224,7 +235,7 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
mac);
if (ret) {
ERROR("Failed to set default MAC address");
- goto free_args;
+ goto cancel_alarm;
}
}
} else {
@@ -257,7 +268,13 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
.fd = -1,
.type = RTE_INTR_HANDLE_EXT,
};
+ rte_eth_dev_probing_finish(dev);
return 0;
+cancel_alarm:
+ failsafe_hotplug_alarm_cancel(dev);
+unregister_new_callback:
+ rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
+ failsafe_eth_new_event_callback, dev);
free_args:
failsafe_args_free(dev);
free_subs:
@@ -277,6 +294,8 @@ fs_rte_eth_free(const char *name)
dev = rte_eth_dev_allocated(name);
if (dev == NULL)
return -ENODEV;
+ rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
+ failsafe_eth_new_event_callback, dev);
ret = failsafe_eal_uninit(dev);
if (ret)
ERROR("Error while uninitializing sub-EAL");
@@ -294,10 +313,25 @@ static int
rte_pmd_failsafe_probe(struct rte_vdev_device *vdev)
{
const char *name;
+ struct rte_eth_dev *eth_dev;
name = rte_vdev_device_name(vdev);
INFO("Initializing " FAILSAFE_DRIVER_NAME " for %s",
name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(vdev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ ERROR("Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &failsafe_ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
return fs_eth_dev_create(vdev);
}
@@ -318,3 +352,12 @@ static struct rte_vdev_driver failsafe_drv = {
RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_failsafe, PMD_FAILSAFE_PARAM_STRING);
+
+RTE_INIT(failsafe_init_log);
+static void
+failsafe_init_log(void)
+{
+ failsafe_logtype = rte_log_register("pmd.net.failsafe");
+ if (failsafe_logtype >= 0)
+ rte_log_set_level(failsafe_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c
index 366dbea1..2c002b16 100644
--- a/drivers/net/failsafe/failsafe_args.c
+++ b/drivers/net/failsafe/failsafe_args.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <fcntl.h>
@@ -14,6 +14,7 @@
#include <rte_devargs.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
+#include <rte_string_fns.h>
#include "failsafe_private.h"
@@ -62,7 +63,7 @@ fs_parse_device(struct sub_device *sdev, char *args)
d = &sdev->devargs;
DEBUG("%s", args);
- ret = rte_eal_devargs_parse(args, d);
+ ret = rte_devargs_parse(d, "%s", args);
if (ret) {
DEBUG("devargs parsing failed with code %d", ret);
return ret;
@@ -340,7 +341,7 @@ fs_remove_sub_devices_definition(char params[DEVARGS_MAXLEN])
a = b + 1;
}
out:
- snprintf(params, DEVARGS_MAXLEN, "%s", buffer);
+ strlcpy(params, buffer, DEVARGS_MAXLEN);
return 0;
}
@@ -392,7 +393,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
ret = 0;
priv->subs_tx = FAILSAFE_MAX_ETHPORTS;
/* default parameters */
- n = snprintf(mut_params, sizeof(mut_params), "%s", params);
+ n = strlcpy(mut_params, params, sizeof(mut_params));
if (n >= sizeof(mut_params)) {
ERROR("Parameter string too long (>=%zu)",
sizeof(mut_params));
diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c
index c3d67312..5672f396 100644
--- a/drivers/net/failsafe/failsafe_eal.c
+++ b/drivers/net/failsafe/failsafe_eal.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <rte_malloc.h>
@@ -18,8 +18,9 @@ fs_ethdev_portid_get(const char *name, uint16_t *port_id)
return -EINVAL;
}
len = strlen(name);
- RTE_ETH_FOREACH_DEV(pid) {
- if (!strncmp(name, rte_eth_devices[pid].device->name, len)) {
+ for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
+ if (rte_eth_dev_is_valid_port(pid) &&
+ !strncmp(name, rte_eth_devices[pid].device->name, len)) {
*port_id = pid;
return 0;
}
@@ -41,6 +42,8 @@ fs_bus_init(struct rte_eth_dev *dev)
continue;
da = &sdev->devargs;
if (fs_ethdev_portid_get(da->name, &pid) != 0) {
+ struct rte_eth_dev_owner pid_owner;
+
ret = rte_eal_hotplug_add(da->bus->name,
da->name,
da->args);
@@ -55,12 +58,26 @@ fs_bus_init(struct rte_eth_dev *dev)
ERROR("sub_device %d init went wrong", i);
return -ENODEV;
}
+ /*
+ * The NEW callback tried to take ownership, check
+ * whether it succeed or didn't.
+ */
+ rte_eth_dev_owner_get(pid, &pid_owner);
+ if (pid_owner.id != PRIV(dev)->my_owner.id) {
+ INFO("sub_device %d owner(%s_%016"PRIX64") is not my,"
+ " owner(%s_%016"PRIX64"), will try again later",
+ i, pid_owner.name, pid_owner.id,
+ PRIV(dev)->my_owner.name,
+ PRIV(dev)->my_owner.id);
+ continue;
+ }
} else {
+ /* The sub-device port was found. */
char devstr[DEVARGS_MAXLEN] = "";
struct rte_devargs *probed_da =
rte_eth_devices[pid].device->devargs;
- /* Take control of device probed by EAL options. */
+ /* Take control of probed device. */
free(da->args);
memset(da, 0, sizeof(*da));
if (probed_da != NULL)
@@ -69,7 +86,7 @@ fs_bus_init(struct rte_eth_dev *dev)
else
snprintf(devstr, sizeof(devstr), "%s",
rte_eth_devices[pid].device->name);
- ret = rte_eal_devargs_parse(devstr, da);
+ ret = rte_devargs_parse(da, "%s", devstr);
if (ret) {
ERROR("Probed devargs parsing failed with code"
" %d", ret);
@@ -77,28 +94,28 @@ fs_bus_init(struct rte_eth_dev *dev)
}
INFO("Taking control of a probed sub device"
" %d named %s", i, da->name);
- }
- ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner);
- if (ret < 0) {
- INFO("sub_device %d owner set failed (%s),"
- " will try again later", i, strerror(-ret));
- continue;
- } else if (strncmp(rte_eth_devices[pid].device->name, da->name,
- strlen(da->name)) != 0) {
- /*
- * The device probably was removed and its port id was
- * reallocated before ownership set.
- */
- rte_eth_dev_owner_unset(pid, PRIV(dev)->my_owner.id);
- INFO("sub_device %d was probably removed before taking"
- " ownership, will try again later", i);
- continue;
+ ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner);
+ if (ret < 0) {
+ INFO("sub_device %d owner set failed (%s), "
+ "will try again later", i, strerror(-ret));
+ continue;
+ } else if (strncmp(rte_eth_devices[pid].device->name,
+ da->name, strlen(da->name)) != 0) {
+ /*
+ * The device probably was removed and its port
+ * id was reallocated before ownership set.
+ */
+ rte_eth_dev_owner_unset(pid,
+ PRIV(dev)->my_owner.id);
+ INFO("sub_device %d was removed before taking"
+ " ownership, will try again later", i);
+ continue;
+ }
}
ETH(sdev) = &rte_eth_devices[pid];
SUB_ID(sdev) = i;
sdev->fs_dev = dev;
sdev->dev = ETH(sdev)->device;
- ETH(sdev)->state = RTE_ETH_DEV_DEFERRED;
sdev->state = DEV_PROBED;
}
return 0;
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
index 2c0bf936..5b5cb3b4 100644
--- a/drivers/net/failsafe/failsafe_ether.c
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <unistd.h>
@@ -260,6 +260,7 @@ fs_dev_remove(struct sub_device *sdev)
sdev->state = DEV_ACTIVE;
/* fallthrough */
case DEV_ACTIVE:
+ failsafe_eth_dev_unregister_callbacks(sdev);
rte_eth_dev_close(PORT_ID(sdev));
sdev->state = DEV_PROBED;
/* fallthrough */
@@ -321,6 +322,35 @@ fs_rxtx_clean(struct sub_device *sdev)
}
void
+failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev)
+{
+ int ret;
+
+ if (sdev == NULL)
+ return;
+ if (sdev->rmv_callback) {
+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_RMV,
+ failsafe_eth_rmv_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to unregister RMV callback for sub_device"
+ " %d", SUB_ID(sdev));
+ sdev->rmv_callback = 0;
+ }
+ if (sdev->lsc_callback) {
+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_LSC,
+ failsafe_eth_lsc_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to unregister LSC callback for sub_device"
+ " %d", SUB_ID(sdev));
+ sdev->lsc_callback = 0;
+ }
+}
+
+void
failsafe_dev_remove(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
@@ -463,3 +493,26 @@ failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused,
else
return 0;
}
+
+/* Take sub-device ownership before it becomes exposed to the application. */
+int
+failsafe_eth_new_event_callback(uint16_t port_id,
+ enum rte_eth_event_type event __rte_unused,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct rte_eth_dev *fs_dev = cb_arg;
+ struct sub_device *sdev;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) {
+ if (sdev->state >= DEV_PROBED)
+ continue;
+ if (strcmp(sdev->devargs.name, dev->device->name) != 0)
+ continue;
+ rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner);
+ /* The actual owner will be checked after the port probing. */
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c
index ec8c909b..bfe42fce 100644
--- a/drivers/net/failsafe/failsafe_flow.c
+++ b/drivers/net/failsafe/failsafe_flow.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <sys/queue.h>
@@ -174,7 +174,7 @@ fs_flow_flush(struct rte_eth_dev *dev,
static int
fs_flow_query(struct rte_eth_dev *dev,
struct rte_flow *flow,
- enum rte_flow_action_type type,
+ const struct rte_flow_action *action,
void *arg,
struct rte_flow_error *error)
{
@@ -185,7 +185,7 @@ fs_flow_query(struct rte_eth_dev *dev,
if (sdev != NULL) {
int ret = rte_flow_query(PORT_ID(sdev),
flow->flows[SUB_ID(sdev)],
- type, arg, error);
+ action, arg, error);
if ((ret = fs_err(sdev, ret))) {
fs_unlock(dev, 0);
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index 6b7f9c1a..fc6ec37f 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 Mellanox Technologies, Ltd.
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
/**
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 057e435c..24e91c93 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <stdbool.h>
@@ -13,6 +13,7 @@
#include <rte_malloc.h>
#include <rte_flow.h>
#include <rte_cycles.h>
+#include <rte_ethdev.h>
#include "failsafe_private.h"
@@ -81,30 +82,22 @@ static struct rte_eth_dev_info default_infos = {
DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM,
- .flow_type_rss_offloads = 0x0,
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO,
+ .flow_type_rss_offloads =
+ ETH_RSS_IP |
+ ETH_RSS_UDP |
+ ETH_RSS_TCP,
};
static int
fs_dev_configure(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
- uint64_t supp_tx_offloads;
- uint64_t tx_offloads;
uint8_t i;
int ret;
fs_lock(dev, 0);
- supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
- tx_offloads = dev->data->dev_conf.txmode.offloads;
- if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
- rte_errno = ENOTSUP;
- ERROR("Some Tx offloads are not supported, "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, supp_tx_offloads);
- fs_unlock(dev, 0);
- return -rte_errno;
- }
FOREACH_SUBDEV(sdev, i, dev) {
int rmv_interrupt = 0;
int lsc_interrupt = 0;
@@ -145,7 +138,7 @@ fs_dev_configure(struct rte_eth_dev *dev)
fs_unlock(dev, 0);
return ret;
}
- if (rmv_interrupt) {
+ if (rmv_interrupt && sdev->rmv_callback == 0) {
ret = rte_eth_dev_callback_register(PORT_ID(sdev),
RTE_ETH_EVENT_INTR_RMV,
failsafe_eth_rmv_event_callback,
@@ -153,9 +146,11 @@ fs_dev_configure(struct rte_eth_dev *dev)
if (ret)
WARN("Failed to register RMV callback for sub_device %d",
SUB_ID(sdev));
+ else
+ sdev->rmv_callback = 1;
}
dev->data->dev_conf.intr_conf.rmv = 0;
- if (lsc_interrupt) {
+ if (lsc_interrupt && sdev->lsc_callback == 0) {
ret = rte_eth_dev_callback_register(PORT_ID(sdev),
RTE_ETH_EVENT_INTR_LSC,
failsafe_eth_lsc_event_callback,
@@ -163,6 +158,8 @@ fs_dev_configure(struct rte_eth_dev *dev)
if (ret)
WARN("Failed to register LSC callback for sub_device %d",
SUB_ID(sdev));
+ else
+ sdev->lsc_callback = 1;
}
dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
sdev->state = DEV_ACTIVE;
@@ -289,6 +286,7 @@ fs_dev_close(struct rte_eth_dev *dev)
PRIV(dev)->state = DEV_ACTIVE - 1;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Closing sub_device %d", i);
+ failsafe_eth_dev_unregister_callbacks(sdev);
rte_eth_dev_close(PORT_ID(sdev));
sdev->state = DEV_ACTIVE - 1;
}
@@ -296,25 +294,6 @@ fs_dev_close(struct rte_eth_dev *dev)
fs_unlock(dev, 0);
}
-static bool
-fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads;
- uint64_t queue_supp_offloads;
- uint64_t port_supp_offloads;
-
- port_offloads = dev->data->dev_conf.rxmode.offloads;
- queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
- port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- /* Verify we have no conflict with port offloads */
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
static void
fs_rx_queue_release(void *queue)
{
@@ -367,19 +346,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
fs_rx_queue_release(rxq);
dev->data->rx_queues[rx_queue_id] = NULL;
}
- /* Verify application offloads are valid for our port and queue. */
- if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
- rte_errno = ENOTSUP;
- ERROR("Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- PRIV(dev)->infos.rx_offload_capa |
- PRIV(dev)->infos.rx_queue_offload_capa);
- fs_unlock(dev, 0);
- return -rte_errno;
- }
rxq = rte_zmalloc(NULL,
sizeof(*rxq) +
sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -498,25 +464,6 @@ unlock:
return rc;
}
-static bool
-fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads;
- uint64_t queue_supp_offloads;
- uint64_t port_supp_offloads;
-
- port_offloads = dev->data->dev_conf.txmode.offloads;
- queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
- port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- /* Verify we have no conflict with port offloads */
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
static void
fs_tx_queue_release(void *queue)
{
@@ -556,24 +503,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
fs_tx_queue_release(txq);
dev->data->tx_queues[tx_queue_id] = NULL;
}
- /*
- * Don't verify queue offloads for applications which
- * use the old API.
- */
- if (tx_conf != NULL &&
- (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
- rte_errno = ENOTSUP;
- ERROR("Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- PRIV(dev)->infos.tx_offload_capa |
- PRIV(dev)->infos.tx_queue_offload_capa);
- fs_unlock(dev, 0);
- return -rte_errno;
- }
txq = rte_zmalloc("ethdev TX queue",
sizeof(*txq) +
sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -804,26 +733,29 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
} else {
uint64_t rx_offload_capa;
uint64_t rxq_offload_capa;
+ uint64_t rss_hf_offload_capa;
rx_offload_capa = default_infos.rx_offload_capa;
rxq_offload_capa = default_infos.rx_queue_offload_capa;
+ rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
rte_eth_dev_info_get(PORT_ID(sdev),
&PRIV(dev)->infos);
rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
rxq_offload_capa &=
PRIV(dev)->infos.rx_queue_offload_capa;
+ rss_hf_offload_capa &=
+ PRIV(dev)->infos.flow_type_rss_offloads;
}
sdev = TX_SUBDEV(dev);
rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
+ PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
PRIV(dev)->infos.tx_offload_capa &=
default_infos.tx_offload_capa;
PRIV(dev)->infos.tx_queue_offload_capa &=
default_infos.tx_queue_offload_capa;
- PRIV(dev)->infos.flow_type_rss_offloads &=
- default_infos.flow_type_rss_offloads;
}
rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
}
@@ -997,16 +929,52 @@ fs_mac_addr_add(struct rte_eth_dev *dev,
return 0;
}
-static void
+static int
fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct sub_device *sdev;
uint8_t i;
+ int ret;
fs_lock(dev, 0);
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
+ i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+
+ return 0;
+}
+
+static int
+fs_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_rss_hash_update"
+ " failed for sub_device %d with error %d",
+ i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
fs_unlock(dev, 0);
+
+ return 0;
}
static int
@@ -1068,5 +1036,6 @@ const struct eth_dev_ops failsafe_ops = {
.mac_addr_remove = fs_mac_addr_remove,
.mac_addr_add = fs_mac_addr_add,
.mac_addr_set = fs_mac_addr_set,
+ .rss_hash_update = fs_rss_hash_update,
.filter_ctrl = fs_filter_ctrl,
};
diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
index 2d16ba4c..886af861 100644
--- a/drivers/net/failsafe/failsafe_private.h
+++ b/drivers/net/failsafe/failsafe_private.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
@@ -119,6 +119,10 @@ struct sub_device {
volatile unsigned int remove:1;
/* flow isolation state */
int flow_isolated:1;
+ /* RMV callback registration state */
+ unsigned int rmv_callback:1;
+ /* LSC callback registration state */
+ unsigned int lsc_callback:1;
};
struct fs_priv {
@@ -211,6 +215,7 @@ int failsafe_eal_uninit(struct rte_eth_dev *dev);
/* ETH_DEV */
int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
+void failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev);
void failsafe_dev_remove(struct rte_eth_dev *dev);
void failsafe_stats_increment(struct rte_eth_stats *to,
struct rte_eth_stats *from);
@@ -220,6 +225,9 @@ int failsafe_eth_rmv_event_callback(uint16_t port_id,
int failsafe_eth_lsc_event_callback(uint16_t port_id,
enum rte_eth_event_type event,
void *cb_arg, void *out);
+int failsafe_eth_new_event_callback(uint16_t port_id,
+ enum rte_eth_event_type event,
+ void *cb_arg, void *out);
/* GLOBALS */
@@ -326,8 +334,12 @@ extern int mac_from_arg;
#define FS_THREADID_FMT "lu"
#endif
-#define LOG__(level, m, ...) \
- RTE_LOG(level, PMD, "net_failsafe: " m "%c", __VA_ARGS__)
+extern int failsafe_logtype;
+
+#define LOG__(l, m, ...) \
+ rte_log(RTE_LOG_ ## l, failsafe_logtype, \
+ "net_failsafe: " m "%c", __VA_ARGS__)
+
#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
#define DEBUG(...) LOG_(DEBUG, __VA_ARGS__)
#define INFO(...) LOG_(INFO, __VA_ARGS__)
diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c
index 363cf7ba..7bd0f963 100644
--- a/drivers/net/failsafe/failsafe_rxtx.c
+++ b/drivers/net/failsafe/failsafe_rxtx.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <rte_atomic.h>
diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile
index b059a700..d657dff8 100644
--- a/drivers/net/fm10k/Makefile
+++ b/drivers/net/fm10k/Makefile
@@ -19,7 +19,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 30dad3e2..ef307809 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -180,6 +180,7 @@ struct fm10k_rx_queue {
uint8_t drop_en;
uint8_t rx_deferred_start; /* don't start this queue in dev start. */
uint16_t rx_ftag_en; /* indicates FTAG RX supported */
+ uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
};
/*
@@ -211,7 +212,7 @@ struct fm10k_tx_queue {
uint16_t next_rs; /* Next pos to set RS flag */
uint16_t next_dd; /* Next pos to check DD flag */
volatile uint32_t *tail_ptr;
- uint32_t txq_flags; /* Holds flags for this TXq */
+ uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
uint16_t nb_desc;
uint16_t port_id;
uint8_t tx_deferred_start; /** don't start this queue in dev start. */
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 94237610..3ff1b0e0 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -60,6 +60,13 @@ static void fm10k_set_tx_function(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+static void fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned offset;
@@ -444,8 +451,10 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
+ if ((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) == 0)
PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
+
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
@@ -454,6 +463,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ dev->data->scattered_rx = 0;
+
return 0;
}
@@ -756,7 +767,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
/* It adds dual VLAN length for supporting dual VLAN */
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
- dev->data->dev_conf.rxmode.enable_scatter) {
+ rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t reg;
dev->data->scattered_rx = 1;
reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -1233,13 +1244,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
- * leave the speed undefined since there is no 50Gbps Ethernet.
- */
- dev->data->dev_link.link_speed = 0;
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G;
dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
dev->data->dev_link.link_status =
dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
+ dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
return 0;
}
@@ -1377,7 +1386,6 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = pdev;
dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
dev_info->max_rx_queues = hw->mac.max_queues;
@@ -1389,17 +1397,12 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
dev_info->vmdq_queue_base = 0;
dev_info->max_vmdq_pools = ETH_32_POOLS;
dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
dev_info->reta_size = FM10K_MAX_RSS_INDICES;
@@ -1412,6 +1415,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
},
.rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -1422,7 +1426,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
},
.tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
.tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
- .txq_flags = FM10K_SIMPLE_TX_FLAG,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -1571,19 +1575,22 @@ static int
fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if (mask & ETH_VLAN_STRIP_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP))
PMD_INIT_LOG(ERR, "VLAN stripping is "
"always on in fm10k");
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
PMD_INIT_LOG(ERR, "VLAN QinQ is not "
"supported in fm10k");
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER))
PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
}
@@ -1781,6 +1788,27 @@ mempool_element_size_valid(struct rte_mempool *mp)
return 1;
}
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+}
+
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT);
+}
+
static int
fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
@@ -1791,9 +1819,12 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/* make sure the mempool element size can account for alignment. */
if (!mempool_element_size_valid(mp)) {
PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
@@ -1838,6 +1869,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->queue_id = queue_id;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
+ q->offloads = offloads;
if (handle_rxconf(q, conf))
return -EINVAL;
@@ -1947,6 +1979,24 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
return 0;
}
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO);
+}
+
static int
fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
@@ -1955,9 +2005,12 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_tx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/* make sure a valid number of descriptors have been requested */
if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
FM10K_MULT_TX_DESC, nb_desc)) {
@@ -1994,7 +2047,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->nb_desc = nb_desc;
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
- q->txq_flags = conf->txq_flags;
+ q->offloads = offloads;
q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
@@ -2860,7 +2913,7 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
uint16_t tx_ftag_en = 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- /* primary process has set the ftag flag and txq_flags */
+ /* primary process has set the ftag flag and offloads */
txq = dev->data->tx_queues[0];
if (fm10k_tx_vec_condition_check(txq)) {
dev->tx_pkt_burst = fm10k_xmit_pkts;
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 498a1781..005fda63 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -210,7 +210,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
/* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_extend != 0)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
return -1;
#endif
@@ -219,7 +219,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
return -1;
/* no header split support */
- if (rxmode->header_split == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
return 0;
@@ -695,7 +695,7 @@ int __attribute__((cold))
fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
{
/* Vector TX can't offload any features yet */
- if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != FM10K_SIMPLE_TX_FLAG)
+ if (txq->offloads != 0)
return -1;
if (txq->tx_ftag_en)
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 5663f5b1..3f869a8d 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -11,6 +11,8 @@ LIB = librte_pmd_i40e.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF
CFLAGS += -DX722_A0_SUPPORT
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
LDLIBS += -lrte_bus_pci
@@ -24,7 +26,7 @@ LIBABIVER := 2
# to disable warnings
#
ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
-CFLAGS_BASE_DRIVER = -wd593 -wd188
+CFLAGS_BASE_DRIVER = -diag-disable 593
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
CFLAGS_BASE_DRIVER += -Wno-sign-compare
CFLAGS_BASE_DRIVER += -Wno-unused-value
@@ -85,6 +87,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_vf_representor.c
ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
CC_AVX2_SUPPORT=1
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index a482ab90..df66e76a 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -90,7 +90,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
#define I40E_PF_ARQT_ARQT_SHIFT 0
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
@@ -113,7 +113,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
#define I40E_PF_ATQT_ATQT_SHIFT 0
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
@@ -140,7 +140,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ARQT_MAX_INDEX 127
#define I40E_VF_ARQT_ARQT_SHIFT 0
@@ -168,7 +168,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ATQT_MAX_INDEX 127
#define I40E_VF_ATQT_ATQT_SHIFT 0
@@ -291,7 +291,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
@@ -535,7 +535,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
@@ -1274,14 +1274,14 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
#define I40E_QRX_ENA_MAX_INDEX 1535
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
@@ -1692,7 +1692,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLNVM_SRCTL_START_SHIFT 30
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
@@ -3059,7 +3059,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VP_MDET_RX_MAX_INDEX 127
#define I40E_VP_MDET_RX_VALID_SHIFT 0
@@ -3196,7 +3196,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ARQT1_ARQT_SHIFT 0
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
@@ -3219,7 +3219,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VF_ATQT1_ATQT_SHIFT 0
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 508b4171..13c5d329 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -11,6 +11,7 @@
#include <inttypes.h>
#include <assert.h>
+#include <rte_common.h>
#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
@@ -213,7 +214,7 @@
/* Bit mask of Extended Tag enable/disable */
#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
-static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
static int i40e_dev_configure(struct rte_eth_dev *dev);
static int i40e_dev_start(struct rte_eth_dev *dev);
@@ -369,7 +370,12 @@ static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
static int i40e_get_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
-static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -489,6 +495,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.get_reg = i40e_get_regs,
.get_eeprom_length = i40e_get_eeprom_length,
.get_eeprom = i40e_get_eeprom,
+ .get_module_info = i40e_get_module_info,
+ .get_module_eeprom = i40e_get_module_eeprom,
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set = i40e_dev_mtu_set,
.tm_ops_get = i40e_tm_ops_get,
@@ -607,16 +615,74 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
sizeof(rte_i40e_txq_prio_strings[0]))
-static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+static int
+eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct i40e_adapter), eth_i40e_dev_init);
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ &eth_da);
+ if (retval)
+ return retval;
+ }
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct i40e_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_i40e_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ /* probe VF representor ports */
+ struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
+ pci_dev->device.name);
+
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct i40e_vf_representor representor = {
+ .vf_id = eth_da.representor_ports[i],
+ .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
+ pf_ethdev->data->dev_private)->switch_domain_id,
+ .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
+ pf_ethdev->data->dev_private)
+ };
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name, eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct i40e_vf_representor), NULL, NULL,
+ i40e_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create i40e vf "
+ "representor %s.", name);
+ }
+
+ return 0;
}
static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit);
}
static struct rte_pci_driver rte_i40e_pmd = {
@@ -627,41 +693,17 @@ static struct rte_pci_driver rte_i40e_pmd = {
.remove = eth_i40e_pci_remove,
};
-static inline int
-rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-static inline int
-rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static inline void
-i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
+ uint32_t reg_val)
{
+ uint32_t ori_reg_val;
+
+ ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
i40e_write_rx_ctl(hw, reg_addr, reg_val);
- PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
- "with value 0x%08x",
- reg_addr, reg_val);
+ PMD_DRV_LOG(DEBUG,
+ "Global register [0x%08x] original: 0x%08x, after: 0x%08x",
+ reg_addr, ori_reg_val, reg_val);
}
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
@@ -1118,7 +1160,31 @@ i40e_support_multi_driver(struct rte_eth_dev *dev)
}
static int
-eth_i40e_dev_init(struct rte_eth_dev *dev)
+i40e_aq_debug_write_global_register(struct i40e_hw *hw,
+ uint32_t reg_addr, uint64_t reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ uint64_t ori_reg_val;
+ int ret;
+
+ ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from 0x%08x",
+ reg_addr);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(DEBUG,
+ "Global register [0x%08x] original: 0x%"PRIx64
+ ", after: 0x%"PRIx64,
+ reg_addr, ori_reg_val, reg_val);
+
+ return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
+}
+
+static int
+eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev;
struct rte_intr_handle *intr_handle;
@@ -1224,7 +1290,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialise the L3_MAP register */
if (!pf->support_multi_driver) {
- ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GLQF_L3_MAP(40),
0x00000028, NULL);
if (ret)
PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
@@ -1533,6 +1600,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct rte_flow *p_flow;
int ret;
uint8_t aq_fail = 0;
+ int retries = 0;
PMD_INIT_FUNC_TRACE();
@@ -1544,6 +1612,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
+ ret = rte_eth_switch_domain_free(pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1574,9 +1646,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
- /* register callback func to eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40e_dev_interrupt_handler, dev);
+ /* unregister callback func to eal lib */
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ return ret;
+ }
+ i40e_msec_delay(500);
+ } while (retries++ < 5);
i40e_rm_ethtype_filter_list(pf);
i40e_rm_tunnel_filter_list(pf);
@@ -2286,6 +2369,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
i40e_pf_disable_irq0(hw);
rte_intr_disable(intr_handle);
+ i40e_fdir_teardown(pf);
+
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
@@ -2297,7 +2382,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
pf->vmdq = NULL;
/* release all the existing VSIs and VEBs */
- i40e_fdir_teardown(pf);
i40e_vsi_release(pf->main_vsi);
/* shutdown the adminq */
@@ -2339,7 +2423,7 @@ i40e_dev_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
- ret = eth_i40e_dev_init(dev);
+ ret = eth_i40e_dev_init(dev, NULL);
return ret;
}
@@ -2437,84 +2521,143 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
return i40e_phy_conf_link(hw, abilities, speed, false);
}
-int
-i40e_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete)
+static __rte_always_inline void
+update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
+{
+/* Link status registers and values*/
+#define I40E_PRTMAC_LINKSTA 0x001E2420
+#define I40E_REG_LINK_UP 0x40000080
+#define I40E_PRTMAC_MACC 0x001E24E0
+#define I40E_REG_MACC_25GB 0x00020000
+#define I40E_REG_SPEED_MASK 0x38000000
+#define I40E_REG_SPEED_100MB 0x00000000
+#define I40E_REG_SPEED_1GB 0x08000000
+#define I40E_REG_SPEED_10GB 0x10000000
+#define I40E_REG_SPEED_20GB 0x20000000
+#define I40E_REG_SPEED_25_40GB 0x18000000
+ uint32_t link_speed;
+ uint32_t reg_val;
+
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
+ link_speed = reg_val & I40E_REG_SPEED_MASK;
+ reg_val &= I40E_REG_LINK_UP;
+ link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
+
+ if (unlikely(link->link_status == 0))
+ return;
+
+ /* Parse the link status */
+ switch (link_speed) {
+ case I40E_REG_SPEED_100MB:
+ link->link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_REG_SPEED_1GB:
+ link->link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_REG_SPEED_10GB:
+ link->link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_REG_SPEED_20GB:
+ link->link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_REG_SPEED_25_40GB:
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
+
+ if (reg_val & I40E_REG_MACC_25GB)
+ link->link_speed = ETH_SPEED_NUM_25G;
+ else
+ link->link_speed = ETH_SPEED_NUM_40G;
+
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
+ break;
+ }
+}
+
+static __rte_always_inline void
+update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
+ bool enable_lse, int wait_to_complete)
{
-#define CHECK_INTERVAL 100 /* 100ms */
-#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
+ uint32_t rep_cnt = MAX_REPEAT_TIME;
struct i40e_link_status link_status;
- struct rte_eth_link link, old;
int status;
- unsigned rep_cnt = MAX_REPEAT_TIME;
- bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
- memset(&link, 0, sizeof(link));
- memset(&old, 0, sizeof(old));
memset(&link_status, 0, sizeof(link_status));
- rte_i40e_dev_atomic_read_link_status(dev, &old);
do {
+ memset(&link_status, 0, sizeof(link_status));
+
/* Get link status information from hardware */
status = i40e_aq_get_link_info(hw, enable_lse,
&link_status, NULL);
- if (status != I40E_SUCCESS) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ if (unlikely(status != I40E_SUCCESS)) {
+ link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Failed to get link info");
- goto out;
+ return;
}
- link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
- if (!wait_to_complete || link.link_status)
+ link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
+ if (!wait_to_complete || link->link_status)
break;
rte_delay_ms(CHECK_INTERVAL);
} while (--rep_cnt);
- if (!link.link_status)
- goto out;
-
- /* i40e uses full duplex only */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
-
/* Parse the link status */
switch (link_status.link_speed) {
case I40E_LINK_SPEED_100MB:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = ETH_SPEED_NUM_100M;
break;
case I40E_LINK_SPEED_1GB:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = ETH_SPEED_NUM_1G;
break;
case I40E_LINK_SPEED_10GB:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = ETH_SPEED_NUM_10G;
break;
case I40E_LINK_SPEED_20GB:
- link.link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = ETH_SPEED_NUM_20G;
break;
case I40E_LINK_SPEED_25GB:
- link.link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = ETH_SPEED_NUM_25G;
break;
case I40E_LINK_SPEED_40GB:
- link.link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = ETH_SPEED_NUM_40G;
break;
default:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = ETH_SPEED_NUM_100M;
break;
}
+}
+int
+i40e_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+ int ret;
+
+ memset(&link, 0, sizeof(link));
+
+ /* i40e uses full duplex only */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
-out:
- rte_i40e_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
+ if (!wait_to_complete && !enable_lse)
+ update_link_reg(hw, &link);
+ else
+ update_link_aq(hw, &link, enable_lse, wait_to_complete);
+ ret = rte_eth_linkstatus_set(dev, &link);
i40e_notify_all_vfs_link_status(dev);
- return 0;
+ return ret;
}
/* Get all the statistics of a VSI */
@@ -3169,13 +3312,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vsi *vsi = pf->main_vsi;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3183,7 +3326,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3196,7 +3344,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ dev_info->tx_queue_offload_capa;
+ dev_info->dev_capa =
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
dev_info->reta_size = pf->hash_lut_size;
@@ -3210,6 +3364,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3220,8 +3375,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -3248,15 +3402,42 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_tx_queues += dev_info->vmdq_queue_num;
}
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
/* For XL710 */
dev_info->speed_capa = ETH_LINK_SPEED_40G;
- else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+ dev_info->default_rxportconf.nb_queues = 2;
+ dev_info->default_txportconf.nb_queues = 2;
+ if (dev->data->nb_rx_queues == 1)
+ dev_info->default_rxportconf.ring_size = 2048;
+ else
+ dev_info->default_rxportconf.ring_size = 1024;
+ if (dev->data->nb_tx_queues == 1)
+ dev_info->default_txportconf.ring_size = 1024;
+ else
+ dev_info->default_txportconf.ring_size = 512;
+
+ } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
/* For XXV710 */
dev_info->speed_capa = ETH_LINK_SPEED_25G;
- else
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
/* For X710 */
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+ dev_info->default_rxportconf.ring_size = 512;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ }
+ }
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
}
static int
@@ -3307,7 +3488,8 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
return 0;
}
- ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_SWT_L2TAGCTRL(reg_id),
reg_w, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
@@ -3319,6 +3501,8 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
"Global register 0x%08x is changed with value 0x%08x",
I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
+ i40e_global_cfg_warning(I40E_WARNING_TPID);
+
return 0;
}
@@ -3329,7 +3513,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3367,7 +3552,6 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
/* If NVM API < 1.7, keep the register setting */
ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
tpid, qinq);
- i40e_global_cfg_warning(I40E_WARNING_TPID);
return ret;
}
@@ -3377,9 +3561,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3387,14 +3573,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3641,6 +3827,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3659,7 +3846,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -4010,8 +4197,8 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
- mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
- alignment, RTE_PGSIZE_2M);
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
return I40E_ERR_NO_MEMORY;
@@ -5669,6 +5856,12 @@ i40e_pf_setup(struct i40e_pf *pf)
PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
return ret;
}
+
+ ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "failed to allocate switch domain for device %d", ret);
+
if (pf->flags & I40E_FLAG_FDIR) {
/* make queue allocated first, let FDIR use queue pair 0*/
ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
@@ -8194,7 +8387,8 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
}
if (reg != val) {
- ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_PRS_FVBM(2),
reg, NULL);
if (ret != 0)
return ret;
@@ -9087,11 +9281,11 @@ i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
- PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
if (reg != val)
- i40e_write_global_rx_ctl(hw, addr, val);
- PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
- (uint32_t)i40e_read_rx_ctl(hw, addr));
+ i40e_write_rx_ctl(hw, addr, val);
+ PMD_DRV_LOG(DEBUG,
+ "Global register [0x%08x] original: 0x%08x, after: 0x%08x",
+ addr, reg, (uint32_t)i40e_read_rx_ctl(hw, addr));
}
static void
@@ -10321,9 +10515,8 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
uint32_t tsync_inc_h;
/* Get current link speed. */
- memset(&link, 0, sizeof(link));
i40e_dev_link_update(dev, 1);
- rte_i40e_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
case ETH_SPEED_NUM_40G:
@@ -11249,8 +11442,148 @@ static int i40e_get_eeprom(struct rte_eth_dev *dev,
return 0;
}
-static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr)
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t sff8472_comp = 0;
+ uint32_t sff8472_swap = 0;
+ uint32_t sff8636_rev = 0;
+ i40e_status status;
+ uint32_t type = 0;
+
+ /* Check if firmware supports reading module EEPROM. */
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ PMD_DRV_LOG(ERR,
+ "Module EEPROM memory read not supported. "
+ "Please update the NVM image.\n");
+ return -EINVAL;
+ }
+
+ status = i40e_update_link_info(hw);
+ if (status)
+ return -EIO;
+
+ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
+ PMD_DRV_LOG(ERR,
+ "Cannot read module EEPROM memory. "
+ "No module connected.\n");
+ return -EINVAL;
+ }
+
+ type = hw->phy.link_info.module_type[0];
+
+ switch (type) {
+ case I40E_MODULE_TYPE_SFP:
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_COMP,
+ &sff8472_comp, NULL);
+ if (status)
+ return -EIO;
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_SWAP,
+ &sff8472_swap, NULL);
+ if (status)
+ return -EIO;
+
+ /* Check if the module requires address swap to access
+ * the other EEPROM memory page.
+ */
+ if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
+ PMD_DRV_LOG(WARNING,
+ "Module address swap to access "
+ "page 0xA2 is not supported.\n");
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp == 0x00) {
+ /* Module is not SFF-8472 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP_PLUS:
+ /* Read from memory page 0. */
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ 0,
+ I40E_MODULE_REVISION_ADDR,
+ &sff8636_rev, NULL);
+ if (status)
+ return -EIO;
+ /* Determine revision compliance byte */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP28:
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Module type unrecognized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ bool is_sfp = false;
+ i40e_status status;
+ uint8_t *data = info->data;
+ uint32_t value = 0;
+ uint32_t i;
+
+ if (!info || !info->length || !data)
+ return -EINVAL;
+
+ if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ for (i = 0; i < info->length; i++) {
+ u32 offset = i + info->offset;
+ u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
+ offset -= RTE_ETH_MODULE_SFF_8079_LEN;
+ addr = I40E_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
+ addr++;
+ }
+ }
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ addr, offset, &value, NULL);
+ if (status)
+ return -EIO;
+ data[i] = (uint8_t)value;
+ }
+ return 0;
+}
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -11261,7 +11594,7 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
if (!is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
- return;
+ return -EINVAL;
}
TAILQ_FOREACH(f, &vsi->mac_list, next) {
@@ -11271,25 +11604,31 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
if (f == NULL) {
PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
- return;
+ return -EIO;
}
mac_filter = f->mac_info;
ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to delete mac filter");
- return;
+ return -EIO;
}
memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
ret = i40e_vsi_add_mac(vsi, &mac_filter);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to add mac filter");
- return;
+ return -EIO;
}
memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
- i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
- mac_addr->addr_bytes, NULL);
+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ mac_addr->addr_bytes, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to change mac");
+ return -EIO;
+ }
+
+ return 0;
}
static int
@@ -11312,9 +11651,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -11413,7 +11754,7 @@ i40e_rss_filter_restore(struct i40e_pf *pf)
{
struct i40e_rte_flow_rss_conf *conf =
&pf->rss_info;
- if (conf->num)
+ if (conf->conf.queue_num)
i40e_config_rss_filter(pf, conf, TRUE);
}
@@ -11456,7 +11797,8 @@ i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
static int
i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t pkg_size, uint32_t proto_num,
- struct rte_pmd_i40e_proto_info *proto)
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
uint32_t pctype_num;
@@ -11469,6 +11811,12 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t i, j, n;
int ret;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
+
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
(uint8_t *)&pctype_num, sizeof(pctype_num),
RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
@@ -11531,8 +11879,13 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
i40e_find_customized_pctype(pf,
I40E_CUSTOMIZED_GTPU);
if (new_pctype) {
- new_pctype->pctype = pctype_value;
- new_pctype->valid = true;
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+ new_pctype->pctype = pctype_value;
+ new_pctype->valid = true;
+ } else {
+ new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
+ new_pctype->valid = false;
+ }
}
}
@@ -11542,8 +11895,9 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
static int
i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
- uint32_t pkg_size, uint32_t proto_num,
- struct rte_pmd_i40e_proto_info *proto)
+ uint32_t pkg_size, uint32_t proto_num,
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
{
struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
uint16_t port_id = dev->data->port_id;
@@ -11556,6 +11910,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
bool in_tunnel;
int ret;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ rte_pmd_i40e_ptype_mapping_reset(port_id);
+ return 0;
+ }
+
/* get information about new ptype num */
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
(uint8_t *)&ptype_num, sizeof(ptype_num),
@@ -11728,7 +12093,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
void
i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
- uint32_t pkg_size)
+ uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
uint32_t proto_num;
@@ -11737,6 +12102,12 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t i;
int ret;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return;
+ }
+
/* get information about protocol number */
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
(uint8_t *)&proto_num, sizeof(proto_num),
@@ -11770,20 +12141,23 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
/* Check if GTP is supported. */
for (i = 0; i < proto_num; i++) {
if (!strncmp(proto[i].name, "GTP", 3)) {
- pf->gtp_support = true;
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ pf->gtp_support = true;
+ else
+ pf->gtp_support = false;
break;
}
}
/* Update customized pctype info */
ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
- proto_num, proto);
+ proto_num, proto, op);
if (ret)
PMD_DRV_LOG(INFO, "No pctype is updated.");
/* Update customized ptype info */
ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
- proto_num, proto);
+ proto_num, proto, op);
if (ret)
PMD_DRV_LOG(INFO, "No ptype is updated.");
@@ -11912,18 +12286,56 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
}
int
+i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t i, lut = 0;
uint16_t j, num;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
if (!add) {
- if (memcmp(conf, rss_info,
- sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+ if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
i40e_pf_disable_rss(pf);
memset(rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf));
@@ -11932,7 +12344,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
return -EINVAL;
}
- if (rss_info->num)
+ if (rss_info->conf.queue_num)
return -EINVAL;
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
@@ -11943,7 +12355,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
else
num = pf->dev_data->nb_rx_queues;
- num = RTE_MIN(num, conf->num);
+ num = RTE_MIN(num, conf->conf.queue_num);
PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
num);
@@ -11956,7 +12368,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
if (j == num)
j = 0;
- lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+ lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
hw->func_caps.rss_table_entry_width) - 1));
if ((i & 3) == 3)
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
@@ -11981,8 +12393,8 @@ i40e_config_rss_filter(struct i40e_pf *pf,
i40e_hw_rss_hash_set(pf, &rss_conf);
- rte_memcpy(rss_info,
- conf, sizeof(struct i40e_rte_flow_rss_conf));
+ if (i40e_rss_conf_init(rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 99efb670..11c4c76b 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -5,12 +5,18 @@
#ifndef _I40E_ETHDEV_H_
#define _I40E_ETHDEV_H_
+#include <stdint.h>
+
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_tm_driver.h>
+#include "rte_pmd_i40e.h"
+
+#include "base/i40e_register.h"
#define I40E_VLAN_TAG_SIZE 4
@@ -80,11 +86,13 @@
#define I40E_WRITE_GLB_REG(hw, reg, value) \
do { \
+ uint32_t ori_val; \
+ ori_val = I40E_READ_REG((hw), (reg)); \
I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \
(reg)), (value)); \
- PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " \
- "with value 0x%08x", \
- (reg), (value)); \
+ PMD_DRV_LOG(DEBUG, "global register [0x%08x] " \
+ "original: 0x%08x, after: 0x%08x ", \
+ (reg), (ori_val), (value)); \
} while (0)
/* index flex payload per layer */
@@ -877,9 +885,11 @@ struct i40e_customized_pctype {
};
struct i40e_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
uint16_t queue_region_conf; /**< Queue region config flag */
- uint16_t num; /**< Number of entries in queue[]. */
+ uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
+ I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
};
@@ -956,6 +966,8 @@ struct i40e_pf {
bool gtp_support; /* 1 - support GTP-C and GTP-U */
/* customer customized pctype */
struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
+ /* Switch Domain Id */
+ uint16_t switch_domain_id;
};
enum pending_msg {
@@ -1005,6 +1017,9 @@ struct i40e_vf {
uint16_t promisc_flags; /* Promiscuous setting */
uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */
+ struct ether_addr mc_addrs[I40E_NUM_MACADDR_MAX]; /* Multicast addrs */
+ uint16_t mc_addrs_num; /* Multicast mac addresses number */
+
/* Event from pf */
bool dev_closed;
bool link_up;
@@ -1058,6 +1073,20 @@ struct i40e_adapter {
uint64_t pctypes_mask;
};
+/**
+ * Strucute to store private data for each VF representor instance
+ */
+struct i40e_vf_representor {
+ uint16_t switch_domain_id;
+ /**< Virtual Function ID */
+ uint16_t vf_id;
+ /**< Virtual Function ID */
+ struct i40e_adapter *adapter;
+ /**< Private data store of assocaiated physical function */
+ struct i40e_eth_stats stats_offset;
+ /**< Zero-point of VF statistics*/
+};
+
extern const struct rte_flow_ops i40e_flow_ops;
union i40e_filter_t {
@@ -1206,7 +1235,8 @@ void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
struct i40e_customized_pctype*
i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
- uint32_t pkg_size);
+ uint32_t pkg_size,
+ enum rte_pmd_i40e_package_op op);
int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
@@ -1214,8 +1244,14 @@ void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
+int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add);
+int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index fd003fe0..804e4453 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -120,7 +120,7 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
@@ -130,6 +130,14 @@ static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
uint8_t *msg,
uint16_t msglen);
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr, bool add);
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
/* Default hash key buffer for RSS */
static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
@@ -195,6 +203,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.txq_info_get = i40e_txq_info_get,
.mac_addr_add = i40evf_add_mac_addr,
.mac_addr_remove = i40evf_del_mac_addr,
+ .set_mc_addr_list = i40evf_set_mc_addr_list,
.reta_update = i40evf_dev_rss_reta_update,
.reta_query = i40evf_dev_rss_reta_query,
.rss_hash_update = i40evf_dev_rss_hash_update,
@@ -1036,20 +1045,6 @@ static const struct rte_pci_id pci_id_i40evf_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
-static inline int
-i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
/* Disable IRQ0 */
static inline void
i40evf_disable_irq0(struct i40e_hw *hw)
@@ -1541,7 +1536,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1575,7 +1570,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1732,7 +1727,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1752,7 +1747,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -2012,6 +2007,9 @@ i40evf_dev_start(struct rte_eth_dev *dev)
/* Set all mac addrs */
i40evf_add_del_all_mac_addr(dev, TRUE);
+ /* Set all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ TRUE);
if (i40evf_start_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "enable queues failed");
@@ -2036,6 +2034,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
err_mac:
i40evf_add_del_all_mac_addr(dev, FALSE);
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
err_queue:
return -1;
}
@@ -2046,6 +2046,7 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -2063,6 +2064,9 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
}
/* remove all mac addrs */
i40evf_add_del_all_mac_addr(dev, FALSE);
+ /* remove all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
hw->adapter_stopped = 1;
}
@@ -2078,6 +2082,7 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
* while Linux driver does not
*/
+ memset(&new_link, 0, sizeof(new_link));
/* Linux driver PF host */
switch (vf->link_speed) {
case I40E_LINK_SPEED_100MB:
@@ -2107,11 +2112,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
new_link.link_status = vf->link_up ? ETH_LINK_UP :
ETH_LINK_DOWN;
new_link.link_autoneg =
- dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED;
+ !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
- i40evf_dev_atomic_write_link_status(dev, &new_link);
-
- return 0;
+ return rte_eth_linkstatus_set(dev, &new_link);
}
static void
@@ -2180,7 +2183,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2189,6 +2191,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2196,7 +2199,12 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2209,7 +2217,8 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -2219,6 +2228,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2229,8 +2239,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -2281,6 +2290,14 @@ i40evf_dev_close(struct rte_eth_dev *dev)
i40evf_dev_stop(dev);
i40e_dev_free_queues(dev);
+ /*
+ * disable promiscuous mode before reset vf
+ * it is a workaround solution when work with kernel driver
+ * and it is not the normal way
+ */
+ i40evf_dev_promiscuous_disable(dev);
+ i40evf_dev_allmulticast_disable(dev);
+
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
/* disable uio intr before callback unregister */
@@ -2649,16 +2666,17 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
}
-static void
+static int
i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
@@ -2667,15 +2685,99 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
if (!is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
- return;
+ return -EINVAL;
}
if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
- return;
+ return -EPERM;
i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr);
- i40evf_add_mac_addr(dev, mac_addr, 0, 0);
+ if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)
+ return -EIO;
ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+ return 0;
+}
+
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
+ (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
+ uint32_t i;
+ int err;
+ struct vf_cmd_info args;
+
+ if (mc_addrs == NULL || mc_addrs_num == 0)
+ return 0;
+
+ if (mc_addrs_num > I40E_NUM_MACADDR_MAX)
+ return -EINVAL;
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = mc_addrs_num;
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
+ mc_addrs[i].addr_bytes[0],
+ mc_addrs[i].addr_bytes[1],
+ mc_addrs[i].addr_bytes[2],
+ mc_addrs[i].addr_bytes[3],
+ mc_addrs[i].addr_bytes[4],
+ mc_addrs[i].addr_bytes[5]);
+ return -EINVAL;
+ }
+
+ memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
+ sizeof(list->list[i].addr));
+ }
+
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(struct virtchnl_ether_addr_list) +
+ i * sizeof(struct virtchnl_ether_addr);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+
+ /* flush previous addresses */
+ err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = 0;
+
+ /* add new ones */
+ err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num,
+ TRUE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = mc_addrs_num;
+ memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
+
+ return 0;
}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index b83a0cff..a4a61d1c 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -525,7 +525,7 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf,
flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
(num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
}
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 16c47cf7..89de6a59 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -10,6 +10,7 @@
#include <unistd.h>
#include <stdarg.h>
+#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_log.h>
@@ -53,6 +54,7 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_fdir_filter_conf *filter);
@@ -1939,7 +1941,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
@@ -2259,7 +2262,7 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
(raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
}
@@ -2400,7 +2403,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
break;
}
- if (cus_pctype)
+ if (cus_pctype && cus_pctype->valid)
return cus_pctype->pctype;
return I40E_FILTER_PCTYPE_INVALID;
@@ -2419,6 +2422,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
*/
static int
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_fdir_filter_conf *filter)
@@ -2490,16 +2494,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
"Invalid MAC_addr mask.");
return -rte_errno;
}
+ }
+ if (eth_spec && eth_mask && eth_mask->type) {
+ enum rte_flow_item_type next = (item + 1)->type;
- if ((eth_mask->type & UINT16_MAX) ==
- UINT16_MAX) {
- input_set |= I40E_INSET_LAST_ETHER_TYPE;
- filter->input.flow.l2_flow.ether_type =
- eth_spec->type;
+ if (eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid type mask.");
+ return -rte_errno;
}
ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (ether_type == ETHER_TYPE_IPv4 ||
+
+ if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
+ ether_type == ETHER_TYPE_IPv4 ||
ether_type == ETHER_TYPE_IPv6 ||
ether_type == ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
@@ -2509,6 +2519,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
"Unsupported ether_type.");
return -rte_errno;
}
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ eth_spec->type;
}
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
@@ -2518,6 +2531,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
+
+ RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
@@ -2526,6 +2541,33 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
vlan_spec->tci;
}
}
+ if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
+ if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner_type"
+ " mask.");
+ return -rte_errno;
+ }
+
+ ether_type =
+ rte_be_to_cpu_16(vlan_spec->inner_type);
+
+ if (ether_type == ETHER_TYPE_IPv4 ||
+ ether_type == ETHER_TYPE_IPv6 ||
+ ether_type == ETHER_TYPE_ARP ||
+ ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported inner_type.");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ vlan_spec->inner_type;
+ }
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
layer_idx = I40E_FLXPLD_L2_IDX;
@@ -2918,6 +2960,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_VF:
vf_spec = item->spec;
+ if (!attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Matching VF traffic"
+ " without affecting it"
+ " (transfer attribute)"
+ " is unsupported");
+ return -rte_errno;
+ }
filter->input.flow_ext.is_vf = 1;
filter->input.flow_ext.dst_id = vf_spec->id;
if (filter->input.flow_ext.is_vf &&
@@ -3080,7 +3132,8 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
&filter->fdir_filter;
int ret;
- ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
+ fdir_filter);
if (ret)
return ret;
@@ -3284,7 +3337,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -3514,7 +3568,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -4022,7 +4077,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -4150,7 +4206,8 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
info->region[0].user_priority[0] =
- (vlan_spec->tci >> 13) & 0x7;
+ (rte_be_to_cpu_16(
+ vlan_spec->tci) >> 13) & 0x7;
info->region[0].user_priority_num = 1;
info->queue_region_number = 1;
*action_flag = 0;
@@ -4169,6 +4226,19 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
return 0;
}
+/**
+ * This function is used to parse rss queue index, total queue number and
+ * hash functions, If the purpose of this configuration is for queue region
+ * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
+ * In queue region configuration, it also need to parse hardware flowtype
+ * and user_priority from configuration, it will also cheeck the validity
+ * of these parameters. For example, The queue region sizes should
+ * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
+ * hw_flowtype or PCTYPE max index should be 63, the user priority
+ * max index should be 7, and so on. And also, queue index should be
+ * continuous sequence and queue region index should be part of rss
+ * queue index for this port.
+ */
static int
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
@@ -4205,7 +4275,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (action_flag) {
for (n = 0; n < 64; n++) {
- if (rss->rss_conf->rss_hf & (hf_bit << n)) {
+ if (rss->types & (hf_bit << n)) {
conf_info->region[0].hw_flowtype[0] = n;
conf_info->region[0].flowtype_num = 1;
conf_info->queue_region_number = 1;
@@ -4214,41 +4284,73 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
}
+ /**
+ * Do some queue region related parameters check
+ * in order to keep queue index for queue region to be
+ * continuous sequence and also to be part of RSS
+ * queue index for this port.
+ */
+ if (conf_info->queue_region_number) {
+ for (i = 0; i < rss->queue_num; i++) {
+ for (j = 0; j < rss_info->conf.queue_num; j++) {
+ if (rss->queue[i] == rss_info->conf.queue[j])
+ break;
+ }
+ if (j == rss_info->conf.queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+ }
+
+ for (i = 0; i < rss->queue_num - 1; i++) {
+ if (rss->queue[i + 1] != rss->queue[i] + 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+ }
+ }
+
+ /* Parse queue region related parameters from configuration */
for (n = 0; n < conf_info->queue_region_number; n++) {
if (conf_info->region[n].user_priority_num ||
conf_info->region[n].flowtype_num) {
- if (!((rte_is_power_of_2(rss->num)) &&
- rss->num <= 64)) {
- PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
- "total number of queues do not exceed the VSI allocation");
+ if (!((rte_is_power_of_2(rss->queue_num)) &&
+ rss->queue_num <= 64)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
return -rte_errno;
}
if (conf_info->region[n].user_priority[n] >=
I40E_MAX_USER_PRIORITY) {
- PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the user priority max index is 7");
return -rte_errno;
}
if (conf_info->region[n].hw_flowtype[n] >=
I40E_FILTER_PCTYPE_MAX) {
- PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
- return -rte_errno;
- }
-
- if (rss_info->num < rss->num ||
- rss_info->queue[0] < rss->queue[0] ||
- (rss->queue[0] + rss->num >
- rss_info->num + rss_info->queue[0])) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
- "no valid queues");
+ "the hw_flowtype or PCTYPE max index is 63");
return -rte_errno;
}
for (i = 0; i < info->queue_region_number; i++) {
- if (info->region[i].queue_num == rss->num &&
+ if (info->region[i].queue_num ==
+ rss->queue_num &&
info->region[i].queue_start_index ==
rss->queue[0])
break;
@@ -4256,12 +4358,15 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (i == info->queue_region_number) {
if (i > I40E_REGION_MAX_INDEX) {
- PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the queue region max index is 7");
return -rte_errno;
}
info->region[i].queue_num =
- rss->num;
+ rss->queue_num;
info->region[i].queue_start_index =
rss->queue[0];
info->region[i].region_id =
@@ -4301,10 +4406,13 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
rss_config->queue_region_conf = TRUE;
}
+ /**
+ * Return function if this flow is used for queue region configuration
+ */
if (rss_config->queue_region_conf)
return 0;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@@ -4312,7 +4420,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -4321,15 +4429,29 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
return -rte_errno;
}
}
- if (rss->rss_conf)
- rss_config->rss_conf = *rss->rss_conf;
- else
- rss_config->rss_conf.rss_hf =
- pf->adapter->flow_types_mask;
- for (n = 0; n < rss->num; ++n)
- rss_config->queue[n] = rss->queue[n];
- rss_config->num = rss->num;
+ /* Parse RSS related parameters from configuration */
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key too large");
+ if (rss->queue_num > RTE_DIM(rss_config->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (i40e_rss_conf_init(rss_config, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
index++;
/* check if the next not void action is END */
@@ -4385,14 +4507,15 @@ i40e_config_rss_filter_set(struct rte_eth_dev *dev,
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
if (conf->queue_region_conf) {
- i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
conf->queue_region_conf = 0;
} else {
- i40e_config_rss_filter(pf, conf, 1);
+ ret = i40e_config_rss_filter(pf, conf, 1);
}
- return 0;
+ return ret;
}
static int
@@ -4545,6 +4668,8 @@ i40e_flow_create(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_set(dev,
&cons_filter.rss_conf);
+ if (ret)
+ goto free_flow;
flow->rule = &pf->rss_info;
break;
default:
@@ -4846,7 +4971,7 @@ i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
- if (rss_info->num)
+ if (rss_info->conf.queue_num)
ret = i40e_config_rss_filter(pf, rss_info, FALSE);
return ret;
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a6..6032d554 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -40,9 +40,6 @@
/* Base address of the HW descriptor ring should be 128B aligned. */
#define I40E_RING_BASE_ALIGN 128
-#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
#ifdef RTE_LIBRTE_IEEE1588
@@ -1240,7 +1237,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) {
+ if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
@@ -1692,6 +1689,75 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_dev_first_queue(uint16_t idx, void **queues, int num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ if (i != idx && queues[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_rx_queue *rxq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ uint16_t buf_size =
+ (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ int use_scattered_rx =
+ ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size);
+
+ if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_queue(rxq->queue_id,
+ dev->data->rx_queues,
+ dev->data->nb_rx_queues)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags to default and call
+ * i40e_set_rx_function.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ dev->data->scattered_rx = use_scattered_rx;
+ if (use_def_burst_func)
+ ad->rx_bulk_alloc_allowed = false;
+ i40e_set_rx_function(dev);
+ return 0;
+ }
+
+ /* check bulk alloc conflict */
+ if (ad->rx_bulk_alloc_allowed && use_def_burst_func) {
+ PMD_DRV_LOG(ERR, "Can't use default burst.");
+ return -EINVAL;
+ }
+ /* check scatterred conflict */
+ if (!dev->data->scattered_rx && use_scattered_rx) {
+ PMD_DRV_LOG(ERR, "Scattered rx is required.");
+ return -EINVAL;
+ }
+ /* check vector conflict */
+ if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) {
+ PMD_DRV_LOG(ERR, "Failed vector rx setup.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1778,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ uint64_t offloads;
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,11 +1829,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
/* Allocate the maximun number of RX ring hardware descriptor. */
len = I40E_MAX_RING_DESC;
@@ -1808,25 +1878,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
i40e_reset_rx_queue(rxq);
rxq->q_set = TRUE;
- dev->data->rx_queues[queue_idx] = rxq;
-
- use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
-
- if (!use_def_burst_func) {
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
-#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
- } else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
- "not enabled on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
- ad->rx_bulk_alloc_allowed = false;
- }
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
@@ -1841,6 +1892,34 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->dcb_tc = i;
}
+ if (dev->data->dev_started) {
+ if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
+ i40e_dev_rx_queue_release(rxq);
+ return -EINVAL;
+ }
+ } else {
+ use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ if (!use_def_burst_func) {
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+ "not enabled on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+ }
+
+ dev->data->rx_queues[queue_idx] = rxq;
return 0;
}
@@ -1972,6 +2051,52 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (i40e_tx_queue_init(txq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do TX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_queue(txq->queue_id,
+ dev->data->tx_queues,
+ dev->data->nb_tx_queues)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags and call
+ * i40e_set_tx_function.
+ */
+ i40e_set_tx_function_flag(dev, txq);
+ i40e_set_tx_function(dev);
+ return 0;
+ }
+
+ /* check vector conflict */
+ if (ad->tx_vec_allowed) {
+ if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ ||
+ i40e_txq_vec_setup(txq)) {
+ PMD_DRV_LOG(ERR, "Failed vector tx setup.");
+ return -EINVAL;
+ }
+ }
+ /* check simple tx conflict */
+ if (ad->tx_simple_allowed) {
+ if (txq->offloads != 0 ||
+ txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
+ PMD_DRV_LOG(ERR, "No-simple tx is required.");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1989,6 +2114,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ uint64_t offloads;
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2123,7 +2251,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->queue_id = queue_idx;
txq->reg_idx = reg_idx;
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
@@ -2144,10 +2272,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
i40e_reset_tx_queue(txq);
txq->q_set = TRUE;
- dev->data->tx_queues[queue_idx] = txq;
-
- /* Use a simple TX queue without offloads or multi segs if possible */
- i40e_set_tx_function_flag(dev, txq);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
@@ -2162,6 +2286,20 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->dcb_tc = i;
}
+ if (dev->data->dev_started) {
+ if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
+ i40e_dev_tx_queue_release(txq);
+ return -EINVAL;
+ }
+ } else {
+ /**
+ * Use a simple TX queue without offloads or
+ * multi segs if possible
+ */
+ i40e_set_tx_function_flag(dev, txq);
+ }
+ dev->data->tx_queues[queue_idx] = txq;
+
return 0;
}
@@ -2189,8 +2327,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
if (mz)
return mz;
- mz = rte_memzone_reserve_aligned(name, len,
- socket_id, 0, I40E_RING_BASE_ALIGN);
+ mz = rte_memzone_reserve_aligned(name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN);
return mz;
}
@@ -2469,7 +2607,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2885,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -2765,8 +2904,8 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+ qinfo->conf.offloads = txq->offloads;
}
void __attribute__((cold))
@@ -2889,19 +3028,24 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- /* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
- && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
- if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
- PMD_INIT_LOG(DEBUG, "Vector tx"
- " can be enabled on this txq.");
-
- } else {
- ad->tx_vec_allowed = false;
- }
- } else {
- ad->tx_simple_allowed = false;
- }
+ /* Use a simple Tx queue if possible (only fast free is allowed) */
+ ad->tx_simple_allowed =
+ (txq->offloads ==
+ (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
+ ad->tx_vec_allowed = (ad->tx_simple_allowed &&
+ txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
+
+ if (ad->tx_vec_allowed)
+ PMD_INIT_LOG(DEBUG, "Vector Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else if (ad->tx_simple_allowed)
+ PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else
+ PMD_INIT_LOG(DEBUG,
+ "Neither simple nor vector Tx enabled on Tx queue %u\n",
+ txq->queue_id);
}
void __attribute__((cold))
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd7923..ea73a8a1 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
@@ -141,13 +142,13 @@ struct i40e_tx_queue {
uint16_t port_id; /**< Device port identifier. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx;
- uint32_t txq_flags;
struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
uint16_t tx_next_dd;
uint16_t tx_next_rs;
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 3ffedcb9..63cb1774 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -202,11 +202,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
/* - no csum error report support
* - no header split support
*/
- if (rxmode->header_split == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
/* no QinQ support */
- if (rxmode->hw_vlan_extend == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
return -1;
return 0;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index e549d1e8..83572ef8 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * Copyright(c) 2016, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation.
+ * Copyright(c) 2016-2018, Linaro Limited.
*/
#include <stdint.h>
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
new file mode 100644
index 00000000..f9f13161
--- /dev/null
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_type.h"
+#include "base/virtchnl.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "rte_pmd_i40e.h"
+
+static int
+i40e_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return i40e_dev_link_update(representor->adapter->eth_dev,
+ wait_to_complete);
+}
+static void
+i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ /* get dev info for the vdev */
+ dev_info->device = ethdev->device;
+
+ dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
+ dev_info->max_tx_queues = ethdev->data->nb_tx_queues;
+
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+ dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
+ dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->switch_info.name =
+ representor->adapter->eth_dev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int
+i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int
+i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void
+i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int
+i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static void
+i40evf_stat_update_48(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = *stat - *offset;
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
+
+ *stat &= I40E_48_BIT_MASK;
+}
+
+static void
+i40evf_stat_update_32(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = (uint64_t)(*stat - *offset);
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
+
+static int
+rte_pmd_i40e_get_vf_native_stats(uint16_t port,
+ uint16_t vf_id,
+ struct i40e_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ i40e_update_vsi_stats(vsi);
+ memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats));
+
+ return 0;
+}
+
+static int
+i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_stats *stats)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ struct i40e_eth_stats native_stats;
+ int ret;
+
+ ret = rte_pmd_i40e_get_vf_native_stats(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &native_stats);
+ if (ret == 0) {
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_bytes,
+ &native_stats.rx_bytes);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_unicast,
+ &native_stats.rx_unicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_multicast,
+ &native_stats.rx_multicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_broadcast,
+ &native_stats.rx_broadcast);
+ i40evf_stat_update_32(
+ &representor->stats_offset.rx_discards,
+ &native_stats.rx_discards);
+ i40evf_stat_update_32(
+ &representor->stats_offset.rx_unknown_protocol,
+ &native_stats.rx_unknown_protocol);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_bytes,
+ &native_stats.tx_bytes);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_unicast,
+ &native_stats.tx_unicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_multicast,
+ &native_stats.tx_multicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_broadcast,
+ &native_stats.tx_broadcast);
+ i40evf_stat_update_32(
+ &representor->stats_offset.tx_errors,
+ &native_stats.tx_errors);
+ i40evf_stat_update_32(
+ &representor->stats_offset.tx_discards,
+ &native_stats.tx_discards);
+
+ stats->ipackets = native_stats.rx_unicast +
+ native_stats.rx_multicast +
+ native_stats.rx_broadcast;
+ stats->opackets = native_stats.tx_unicast +
+ native_stats.tx_multicast +
+ native_stats.tx_broadcast;
+ stats->ibytes = native_stats.rx_bytes;
+ stats->obytes = native_stats.tx_bytes;
+ stats->ierrors = native_stats.rx_discards;
+ stats->oerrors = native_stats.tx_errors + native_stats.tx_discards;
+ }
+ return ret;
+}
+
+static void
+i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_get_vf_native_stats(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &representor->stats_offset);
+}
+
+static void
+i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_unicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 1);
+}
+
+static void
+i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_unicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 0);
+}
+
+static void
+i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_multicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 1);
+}
+
+static void
+i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_multicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 0);
+}
+
+static void
+i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_remove_vf_mac_addr(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &ethdev->data->mac_addrs[index]);
+}
+
+static int
+i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_i40e_set_vf_mac_addr(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static int
+i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_i40e_set_vf_vlan_filter(
+ representor->adapter->eth_dev->data->port_id,
+ vlan_id, vf_mask, on);
+}
+
+static int
+i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ struct rte_eth_dev *pdev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ uint32_t vfid;
+
+ pdev = representor->adapter->eth_dev;
+ vfid = representor->vf_id;
+
+ if (!is_i40e_supported(pdev)) {
+ PMD_DRV_LOG(ERR, "Invalid PF dev.");
+ return -EINVAL;
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private);
+
+ if (vfid >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vf = &pf->vfs[vfid];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* Enable or disable VLAN filtering offload */
+ if (ethdev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ return i40e_vsi_config_vlan_filter(vsi, TRUE);
+ else
+ return i40e_vsi_config_vlan_filter(vsi, FALSE);
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping offload */
+ if (ethdev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)
+ return i40e_vsi_config_vlan_stripping(vsi, TRUE);
+ else
+ return i40e_vsi_config_vlan_stripping(vsi, FALSE);
+ }
+
+ return -EINVAL;
+}
+
+static void
+i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_vlan_stripq(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, on);
+}
+
+static int
+i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id,
+ __rte_unused int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_i40e_set_vf_vlan_insert(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, vlan_id);
+}
+
+struct eth_dev_ops i40e_representor_dev_ops = {
+ .dev_infos_get = i40e_vf_representor_dev_infos_get,
+
+ .dev_start = i40e_vf_representor_dev_start,
+ .dev_configure = i40e_vf_representor_dev_configure,
+ .dev_stop = i40e_vf_representor_dev_stop,
+
+ .rx_queue_setup = i40e_vf_representor_rx_queue_setup,
+ .tx_queue_setup = i40e_vf_representor_tx_queue_setup,
+
+ .link_update = i40e_vf_representor_link_update,
+
+ .stats_get = i40e_vf_representor_stats_get,
+ .stats_reset = i40e_vf_representor_stats_reset,
+
+ .promiscuous_enable = i40e_vf_representor_promiscuous_enable,
+ .promiscuous_disable = i40e_vf_representor_promiscuous_disable,
+
+ .allmulticast_enable = i40e_vf_representor_allmulticast_enable,
+ .allmulticast_disable = i40e_vf_representor_allmulticast_disable,
+
+ .mac_addr_remove = i40e_vf_representor_mac_addr_remove,
+ .mac_addr_set = i40e_vf_representor_mac_addr_set,
+
+ .vlan_filter_set = i40e_vf_representor_vlan_filter_set,
+ .vlan_offload_set = i40e_vf_representor_vlan_offload_set,
+ .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set,
+ .vlan_pvid_set = i40e_vf_representor_vlan_pvid_set
+
+};
+
+static uint16_t
+i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ struct i40e_pf *pf;
+ struct i40e_pf_vf *vf;
+ struct rte_eth_link *link;
+
+ representor->vf_id =
+ ((struct i40e_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct i40e_vf_representor *)init_params)->switch_domain_id;
+ representor->adapter =
+ ((struct i40e_vf_representor *)init_params)->adapter;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(
+ representor->adapter->eth_dev->data->dev_private);
+
+ if (representor->vf_id >= pf->vf_num)
+ return -ENODEV;
+
+ /** representor shares the same driver as it's PF device */
+ ethdev->device->driver = representor->adapter->eth_dev->device->driver;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &i40e_representor_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
+ ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
+
+ vf = &pf->vfs[representor->vf_id];
+
+ if (!vf->vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -ENODEV;
+ }
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = vf->vsi->nb_qps;
+ ethdev->data->nb_tx_queues = vf->vsi->nb_qps;
+
+ ethdev->data->mac_addrs = &vf->mac_addr;
+
+ /* Link state. Inherited from PF */
+ link = &representor->adapter->eth_dev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+int
+i40e_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+ return 0;
+}
diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build
index 8764b0e5..d783f362 100644
--- a/drivers/net/i40e/meson.build
+++ b/drivers/net/i40e/meson.build
@@ -1,10 +1,13 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
+
cflags += ['-DPF_DRIVER',
'-DVF_DRIVER',
'-DINTEGRATED_VF',
- '-DX722_A0_SUPPORT']
+ '-DX722_A0_SUPPORT',
+ '-DALLOW_EXPERIMENTAL_API']
subdir('base')
objs = [base_objs]
@@ -17,10 +20,12 @@ sources = files(
'i40e_fdir.c',
'i40e_flow.c',
'i40e_tm.c',
+ 'i40e_vf_representor.c',
'rte_pmd_i40e.c'
)
deps += ['hash']
+includes += include_directories('base')
if arch_subdir == 'x86'
dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1)
@@ -36,11 +41,10 @@ if arch_subdir == 'x86'
'i40e_rxtx_vec_avx2.c',
dependencies: [static_rte_ethdev,
static_rte_kvargs, static_rte_hash],
- c_args: '-mavx2')
+ include_directories: includes,
+ c_args: [cflags, '-mavx2'])
objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c')
endif
endif
-includes += include_directories('base')
-
install_headers('rte_pmd_i40e.h')
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index dae59e6d..7aa1a751 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -570,6 +570,49 @@ rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
return 0;
}
+static const struct ether_addr null_mac_addr;
+
+int
+rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (is_same_ether_addr(mac_addr, &vf->mac_addr))
+ /* Reset the mac with NULL address */
+ ether_addr_copy(&null_mac_addr, &vf->mac_addr);
+
+ /* Remove the mac */
+ i40e_vsi_delete_mac(vsi, mac_addr);
+
+ return 0;
+}
+
/* Set vlan strip on/off for specific VF from host */
int
rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
@@ -1530,6 +1573,11 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
return 1;
}
}
+ /* profile with group id 0xff is compatible with any other profile */
+ if ((pinfo->track_id & group_mask) == group_mask) {
+ rte_free(buff);
+ return 0;
+ }
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
if ((p->track_id & group_mask) == 0) {
@@ -1540,6 +1588,8 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
}
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
+ if ((p->track_id & group_mask) == group_mask)
+ continue;
if ((pinfo->track_id & group_mask) !=
(p->track_id & group_mask)) {
PMD_DRV_LOG(INFO, "Profile of different group exists.");
@@ -1603,8 +1653,6 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
return -EINVAL;
}
- i40e_update_customized_info(dev, buff, size);
-
/* Find metadata segment */
metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
pkg_hdr);
@@ -1708,6 +1756,10 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
}
}
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
+ op == RTE_PMD_I40E_PKG_OP_WR_DEL)
+ i40e_update_customized_info(dev, buff, size, op);
+
rte_free(profile_info_sec);
return status;
}
@@ -3071,6 +3123,7 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
{
struct rte_eth_dev *dev;
struct i40e_hw *hw;
+ struct i40e_pf *pf;
uint64_t inset_reg;
uint32_t mask_reg[2];
int i;
@@ -3086,10 +3139,12 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
return -EINVAL;
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- /* Clear mask first */
- for (i = 0; i < 2; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Input set configuration is not supported.");
+ return -ENOTSUP;
+ }
inset_reg = inset->inset;
for (i = 0; i < 2; i++)
@@ -3098,14 +3153,17 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
switch (inset_type) {
case INSET_HASH:
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
for (i = 0; i < 2; i++)
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
+ i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
break;
case INSET_FDIR:
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
@@ -3114,8 +3172,10 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
for (i = 0; i < 2; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
break;
case INSET_FDIR_FLX:
i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index d248adb1..be4a6024 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -456,6 +456,24 @@ int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
struct ether_addr *mac_addr);
/**
+ * Remove the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int
+rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+/**
* Enable/Disable vf vlan strip for all queues in a pool
*
* @param port
diff --git a/drivers/net/ifc/Makefile b/drivers/net/ifc/Makefile
new file mode 100644
index 00000000..1011995b
--- /dev/null
+++ b/drivers/net/ifc/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_ifcvf_vdpa.a
+
+LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_pci -lrte_vhost -lrte_bus_pci
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+#
+# Add extra flags for base driver source files to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+
+VPATH += $(SRCDIR)/base
+
+EXPORT_MAP := rte_ifcvf_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD) += ifcvf_vdpa.c
+SRCS-$(CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD) += ifcvf.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ifc/base/ifcvf.c b/drivers/net/ifc/base/ifcvf.c
new file mode 100644
index 00000000..4b22d9ed
--- /dev/null
+++ b/drivers/net/ifc/base/ifcvf.c
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "ifcvf.h"
+#include "ifcvf_osdep.h"
+
+STATIC void *
+get_cap_addr(struct ifcvf_hw *hw, struct ifcvf_pci_cap *cap)
+{
+ u8 bar = cap->bar;
+ u32 length = cap->length;
+ u32 offset = cap->offset;
+
+ if (bar > IFCVF_PCI_MAX_RESOURCE - 1) {
+ DEBUGOUT("invalid bar: %u\n", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ DEBUGOUT("offset(%u) + length(%u) overflows\n",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > hw->mem_resource[cap->bar].len) {
+ DEBUGOUT("offset(%u) + length(%u) overflows bar length(%u)",
+ offset, length, (u32)hw->mem_resource[cap->bar].len);
+ return NULL;
+ }
+
+ return hw->mem_resource[bar].addr + offset;
+}
+
+int
+ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev)
+{
+ int ret;
+ u8 pos;
+ struct ifcvf_pci_cap cap;
+
+ ret = PCI_READ_CONFIG_BYTE(dev, &pos, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ DEBUGOUT("failed to read pci capability list\n");
+ return -1;
+ }
+
+ while (pos) {
+ ret = PCI_READ_CONFIG_RANGE(dev, (u32 *)&cap,
+ sizeof(cap), pos);
+ if (ret < 0) {
+ DEBUGOUT("failed to read cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR)
+ goto next;
+
+ DEBUGOUT("cfg type: %u, bar: %u, offset: %u, "
+ "len: %u\n", cap.cfg_type, cap.bar,
+ cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case IFCVF_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cap_addr(hw, &cap);
+ break;
+ case IFCVF_PCI_CAP_NOTIFY_CFG:
+ PCI_READ_CONFIG_DWORD(dev, &hw->notify_off_multiplier,
+ pos + sizeof(cap));
+ hw->notify_base = get_cap_addr(hw, &cap);
+ hw->notify_region = cap.bar;
+ break;
+ case IFCVF_PCI_CAP_ISR_CFG:
+ hw->isr = get_cap_addr(hw, &cap);
+ break;
+ case IFCVF_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cap_addr(hw, &cap);
+ break;
+ }
+next:
+ pos = cap.cap_next;
+ }
+
+ hw->lm_cfg = hw->mem_resource[4].addr;
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->isr == NULL || hw->dev_cfg == NULL) {
+ DEBUGOUT("capability incomplete\n");
+ return -1;
+ }
+
+ DEBUGOUT("capability mapping:\ncommon cfg: %p\n"
+ "notify base: %p\nisr cfg: %p\ndevice cfg: %p\n"
+ "multiplier: %u\n",
+ hw->common_cfg, hw->dev_cfg,
+ hw->isr, hw->notify_base,
+ hw->notify_off_multiplier);
+
+ return 0;
+}
+
+STATIC u8
+ifcvf_get_status(struct ifcvf_hw *hw)
+{
+ return IFCVF_READ_REG8(&hw->common_cfg->device_status);
+}
+
+STATIC void
+ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
+{
+ IFCVF_WRITE_REG8(status, &hw->common_cfg->device_status);
+}
+
+STATIC void
+ifcvf_reset(struct ifcvf_hw *hw)
+{
+ ifcvf_set_status(hw, 0);
+
+ /* flush status write */
+ while (ifcvf_get_status(hw))
+ msec_delay(1);
+}
+
+STATIC void
+ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
+{
+ if (status != 0)
+ status |= ifcvf_get_status(hw);
+
+ ifcvf_set_status(hw, status);
+ ifcvf_get_status(hw);
+}
+
+u64
+ifcvf_get_features(struct ifcvf_hw *hw)
+{
+ u32 features_lo, features_hi;
+ struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG32(0, &cfg->device_feature_select);
+ features_lo = IFCVF_READ_REG32(&cfg->device_feature);
+
+ IFCVF_WRITE_REG32(1, &cfg->device_feature_select);
+ features_hi = IFCVF_READ_REG32(&cfg->device_feature);
+
+ return ((u64)features_hi << 32) | features_lo;
+}
+
+STATIC void
+ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
+{
+ struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG32(0, &cfg->guest_feature_select);
+ IFCVF_WRITE_REG32(features & ((1ULL << 32) - 1), &cfg->guest_feature);
+
+ IFCVF_WRITE_REG32(1, &cfg->guest_feature_select);
+ IFCVF_WRITE_REG32(features >> 32, &cfg->guest_feature);
+}
+
+STATIC int
+ifcvf_config_features(struct ifcvf_hw *hw)
+{
+ u64 host_features;
+
+ host_features = ifcvf_get_features(hw);
+ hw->req_features &= host_features;
+
+ ifcvf_set_features(hw, hw->req_features);
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_FEATURES_OK);
+
+ if (!(ifcvf_get_status(hw) & IFCVF_CONFIG_STATUS_FEATURES_OK)) {
+ DEBUGOUT("failed to set FEATURES_OK status\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+STATIC void
+io_write64_twopart(u64 val, u32 *lo, u32 *hi)
+{
+ IFCVF_WRITE_REG32(val & ((1ULL << 32) - 1), lo);
+ IFCVF_WRITE_REG32(val >> 32, hi);
+}
+
+STATIC int
+ifcvf_hw_enable(struct ifcvf_hw *hw)
+{
+ struct ifcvf_pci_common_cfg *cfg;
+ u8 *lm_cfg;
+ u32 i;
+ u16 notify_off;
+
+ cfg = hw->common_cfg;
+ lm_cfg = hw->lm_cfg;
+
+ IFCVF_WRITE_REG16(0, &cfg->msix_config);
+ if (IFCVF_READ_REG16(&cfg->msix_config) == IFCVF_MSI_NO_VECTOR) {
+ DEBUGOUT("msix vec alloc failed for device config\n");
+ return -1;
+ }
+
+ for (i = 0; i < hw->nr_vring; i++) {
+ IFCVF_WRITE_REG16(i, &cfg->queue_select);
+ io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
+ IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
+
+ *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
+ (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4) =
+ (u32)hw->vring[i].last_avail_idx |
+ ((u32)hw->vring[i].last_used_idx << 16);
+
+ IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
+ if (IFCVF_READ_REG16(&cfg->queue_msix_vector) ==
+ IFCVF_MSI_NO_VECTOR) {
+ DEBUGOUT("queue %u, msix vec alloc failed\n",
+ i);
+ return -1;
+ }
+
+ notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
+ hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+ IFCVF_WRITE_REG16(1, &cfg->queue_enable);
+ }
+
+ return 0;
+}
+
+STATIC void
+ifcvf_hw_disable(struct ifcvf_hw *hw)
+{
+ u32 i;
+ struct ifcvf_pci_common_cfg *cfg;
+ u32 ring_state;
+
+ cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->msix_config);
+ for (i = 0; i < hw->nr_vring; i++) {
+ IFCVF_WRITE_REG16(i, &cfg->queue_select);
+ IFCVF_WRITE_REG16(0, &cfg->queue_enable);
+ IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->queue_msix_vector);
+ ring_state = *(u32 *)(hw->lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
+ (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4);
+ hw->vring[i].last_avail_idx = (u16)ring_state;
+ hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
+ }
+}
+
+int
+ifcvf_start_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_reset(hw);
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_ACK);
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER);
+
+ if (ifcvf_config_features(hw) < 0)
+ return -1;
+
+ if (ifcvf_hw_enable(hw) < 0)
+ return -1;
+
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER_OK);
+ return 0;
+}
+
+void
+ifcvf_stop_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_hw_disable(hw);
+ ifcvf_reset(hw);
+}
+
+void
+ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
+{
+ IFCVF_WRITE_REG16(qid, hw->notify_addr[qid]);
+}
+
+u8
+ifcvf_get_notify_region(struct ifcvf_hw *hw)
+{
+ return hw->notify_region;
+}
+
+u64
+ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid)
+{
+ return (u8 *)hw->notify_addr[qid] -
+ (u8 *)hw->mem_resource[hw->notify_region].addr;
+}
diff --git a/drivers/net/ifc/base/ifcvf.h b/drivers/net/ifc/base/ifcvf.h
new file mode 100644
index 00000000..badacb61
--- /dev/null
+++ b/drivers/net/ifc/base/ifcvf.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IFCVF_H_
+#define _IFCVF_H_
+
+#include "ifcvf_osdep.h"
+
+#define IFCVF_VENDOR_ID 0x1AF4
+#define IFCVF_DEVICE_ID 0x1041
+#define IFCVF_SUBSYS_VENDOR_ID 0x8086
+#define IFCVF_SUBSYS_DEVICE_ID 0x001A
+
+#define IFCVF_MAX_QUEUES 1
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* Common configuration */
+#define IFCVF_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define IFCVF_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define IFCVF_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define IFCVF_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define IFCVF_PCI_CAP_PCI_CFG 5
+
+#define IFCVF_CONFIG_STATUS_RESET 0x00
+#define IFCVF_CONFIG_STATUS_ACK 0x01
+#define IFCVF_CONFIG_STATUS_DRIVER 0x02
+#define IFCVF_CONFIG_STATUS_DRIVER_OK 0x04
+#define IFCVF_CONFIG_STATUS_FEATURES_OK 0x08
+#define IFCVF_CONFIG_STATUS_FAILED 0x80
+
+#define IFCVF_MSI_NO_VECTOR 0xffff
+#define IFCVF_PCI_MAX_RESOURCE 6
+
+#define IFCVF_LM_CFG_SIZE 0x40
+#define IFCVF_LM_RING_STATE_OFFSET 0x20
+
+#define IFCVF_LM_LOGGING_CTRL 0x0
+
+#define IFCVF_LM_BASE_ADDR_LOW 0x10
+#define IFCVF_LM_BASE_ADDR_HIGH 0x14
+#define IFCVF_LM_END_ADDR_LOW 0x18
+#define IFCVF_LM_END_ADDR_HIGH 0x1c
+
+#define IFCVF_LM_DISABLE 0x0
+#define IFCVF_LM_ENABLE_VF 0x1
+#define IFCVF_LM_ENABLE_PF 0x3
+
+#define IFCVF_32_BIT_MASK 0xffffffff
+
+
+struct ifcvf_pci_cap {
+ u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ u8 cap_next; /* Generic PCI field: next ptr. */
+ u8 cap_len; /* Generic PCI field: capability length */
+ u8 cfg_type; /* Identifies the structure. */
+ u8 bar; /* Where to find it. */
+ u8 padding[3]; /* Pad to full dword. */
+ u32 offset; /* Offset within bar. */
+ u32 length; /* Length of the structure, in bytes. */
+};
+
+struct ifcvf_pci_notify_cap {
+ struct ifcvf_pci_cap cap;
+ u32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+struct ifcvf_pci_common_cfg {
+ /* About the whole device. */
+ u32 device_feature_select;
+ u32 device_feature;
+ u32 guest_feature_select;
+ u32 guest_feature;
+ u16 msix_config;
+ u16 num_queues;
+ u8 device_status;
+ u8 config_generation;
+
+ /* About a specific virtqueue. */
+ u16 queue_select;
+ u16 queue_size;
+ u16 queue_msix_vector;
+ u16 queue_enable;
+ u16 queue_notify_off;
+ u32 queue_desc_lo;
+ u32 queue_desc_hi;
+ u32 queue_avail_lo;
+ u32 queue_avail_hi;
+ u32 queue_used_lo;
+ u32 queue_used_hi;
+};
+
+struct ifcvf_net_config {
+ u8 mac[6];
+ u16 status;
+ u16 max_virtqueue_pairs;
+} __attribute__((packed));
+
+struct ifcvf_pci_mem_resource {
+ u64 phys_addr; /**< Physical address, 0 if not resource. */
+ u64 len; /**< Length of the resource. */
+ u8 *addr; /**< Virtual address, NULL when not mapped. */
+};
+
+struct vring_info {
+ u64 desc;
+ u64 avail;
+ u64 used;
+ u16 size;
+ u16 last_avail_idx;
+ u16 last_used_idx;
+};
+
+struct ifcvf_hw {
+ u64 req_features;
+ u8 notify_region;
+ u32 notify_off_multiplier;
+ struct ifcvf_pci_common_cfg *common_cfg;
+ struct ifcvf_net_device_config *dev_cfg;
+ u8 *isr;
+ u16 *notify_base;
+ u16 *notify_addr[IFCVF_MAX_QUEUES * 2];
+ u8 *lm_cfg;
+ struct vring_info vring[IFCVF_MAX_QUEUES * 2];
+ u8 nr_vring;
+ struct ifcvf_pci_mem_resource mem_resource[IFCVF_PCI_MAX_RESOURCE];
+};
+
+int
+ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev);
+
+u64
+ifcvf_get_features(struct ifcvf_hw *hw);
+
+int
+ifcvf_start_hw(struct ifcvf_hw *hw);
+
+void
+ifcvf_stop_hw(struct ifcvf_hw *hw);
+
+void
+ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
+
+u8
+ifcvf_get_notify_region(struct ifcvf_hw *hw);
+
+u64
+ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid);
+
+#endif /* _IFCVF_H_ */
diff --git a/drivers/net/ifc/base/ifcvf_osdep.h b/drivers/net/ifc/base/ifcvf_osdep.h
new file mode 100644
index 00000000..cf151ef5
--- /dev/null
+++ b/drivers/net/ifc/base/ifcvf_osdep.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IFCVF_OSDEP_H_
+#define _IFCVF_OSDEP_H_
+
+#include <stdint.h>
+#include <linux/pci_regs.h>
+
+#include <rte_cycles.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#define DEBUGOUT(S, args...) RTE_LOG(DEBUG, PMD, S, ##args)
+#define STATIC static
+
+#define msec_delay rte_delay_ms
+
+#define IFCVF_READ_REG8(reg) rte_read8(reg)
+#define IFCVF_WRITE_REG8(val, reg) rte_write8((val), (reg))
+#define IFCVF_READ_REG16(reg) rte_read16(reg)
+#define IFCVF_WRITE_REG16(val, reg) rte_write16((val), (reg))
+#define IFCVF_READ_REG32(reg) rte_read32(reg)
+#define IFCVF_WRITE_REG32(val, reg) rte_write32((val), (reg))
+
+typedef struct rte_pci_device PCI_DEV;
+
+#define PCI_READ_CONFIG_BYTE(dev, val, where) \
+ rte_pci_read_config(dev, val, 1, where)
+
+#define PCI_READ_CONFIG_DWORD(dev, val, where) \
+ rte_pci_read_config(dev, val, 4, where)
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint64_t u64;
+
+static inline int
+PCI_READ_CONFIG_RANGE(PCI_DEV *dev, uint32_t *val, int size, int where)
+{
+ return rte_pci_read_config(dev, val, size, where);
+}
+
+#endif /* _IFCVF_OSDEP_H_ */
diff --git a/drivers/net/ifc/ifcvf_vdpa.c b/drivers/net/ifc/ifcvf_vdpa.c
new file mode 100644
index 00000000..c6627c23
--- /dev/null
+++ b/drivers/net/ifc/ifcvf_vdpa.c
@@ -0,0 +1,792 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "base/ifcvf.h"
+
+#define DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+static int ifcvf_vdpa_logtype;
+
+struct ifcvf_internal {
+ struct rte_vdpa_dev_addr dev_addr;
+ struct rte_pci_device *pdev;
+ struct ifcvf_hw hw;
+ int vfio_container_fd;
+ int vfio_group_fd;
+ int vfio_dev_fd;
+ pthread_t tid; /* thread for notify relay */
+ int epfd;
+ int vid;
+ int did;
+ uint16_t max_queues;
+ uint64_t features;
+ rte_atomic32_t started;
+ rte_atomic32_t dev_attached;
+ rte_atomic32_t running;
+ rte_spinlock_t lock;
+};
+
+struct internal_list {
+ TAILQ_ENTRY(internal_list) next;
+ struct ifcvf_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+static struct internal_list_head internal_list =
+ TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static struct internal_list *
+find_internal_resource_by_did(int did)
+{
+ int found = 0;
+ struct internal_list *list;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ if (did == list->internal->did) {
+ found = 1;
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ if (!found)
+ return NULL;
+
+ return list;
+}
+
+static struct internal_list *
+find_internal_resource_by_dev(struct rte_pci_device *pdev)
+{
+ int found = 0;
+ struct internal_list *list;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ if (pdev == list->internal->pdev) {
+ found = 1;
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ if (!found)
+ return NULL;
+
+ return list;
+}
+
+static int
+ifcvf_vfio_setup(struct ifcvf_internal *internal)
+{
+ struct rte_pci_device *dev = internal->pdev;
+ char devname[RTE_DEV_NAME_MAX_LEN] = {0};
+ int iommu_group_num;
+ int ret = 0;
+ int i;
+
+ internal->vfio_dev_fd = -1;
+ internal->vfio_group_fd = -1;
+ internal->vfio_container_fd = -1;
+
+ rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+ rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+ &iommu_group_num);
+
+ internal->vfio_container_fd = rte_vfio_container_create();
+ if (internal->vfio_container_fd < 0)
+ return -1;
+
+ internal->vfio_group_fd = rte_vfio_container_group_bind(
+ internal->vfio_container_fd, iommu_group_num);
+ if (internal->vfio_group_fd < 0)
+ goto err;
+
+ if (rte_pci_map_device(dev))
+ goto err;
+
+ internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
+
+ for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
+ i++) {
+ internal->hw.mem_resource[i].addr =
+ internal->pdev->mem_resource[i].addr;
+ internal->hw.mem_resource[i].phys_addr =
+ internal->pdev->mem_resource[i].phys_addr;
+ internal->hw.mem_resource[i].len =
+ internal->pdev->mem_resource[i].len;
+ }
+ ret = ifcvf_init_hw(&internal->hw, internal->pdev);
+
+ return ret;
+
+err:
+ rte_vfio_container_destroy(internal->vfio_container_fd);
+ return -1;
+}
+
+static int
+ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
+{
+ uint32_t i;
+ int ret;
+ struct rte_vhost_memory *mem = NULL;
+ int vfio_container_fd;
+
+ ret = rte_vhost_get_mem_table(internal->vid, &mem);
+ if (ret < 0) {
+ DRV_LOG(ERR, "failed to get VM memory layout.");
+ goto exit;
+ }
+
+ vfio_container_fd = internal->vfio_container_fd;
+
+ for (i = 0; i < mem->nregions; i++) {
+ struct rte_vhost_mem_region *reg;
+
+ reg = &mem->regions[i];
+ DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
+ "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
+ do_map ? "DMA map" : "DMA unmap", i,
+ reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+ if (do_map) {
+ ret = rte_vfio_container_dma_map(vfio_container_fd,
+ reg->host_user_addr, reg->guest_phys_addr,
+ reg->size);
+ if (ret < 0) {
+ DRV_LOG(ERR, "DMA map failed.");
+ goto exit;
+ }
+ } else {
+ ret = rte_vfio_container_dma_unmap(vfio_container_fd,
+ reg->host_user_addr, reg->guest_phys_addr,
+ reg->size);
+ if (ret < 0) {
+ DRV_LOG(ERR, "DMA unmap failed.");
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ if (mem)
+ free(mem);
+ return ret;
+}
+
+static uint64_t
+qva_to_gpa(int vid, uint64_t qva)
+{
+ struct rte_vhost_memory *mem = NULL;
+ struct rte_vhost_mem_region *reg;
+ uint32_t i;
+ uint64_t gpa = 0;
+
+ if (rte_vhost_get_mem_table(vid, &mem) < 0)
+ goto exit;
+
+ for (i = 0; i < mem->nregions; i++) {
+ reg = &mem->regions[i];
+
+ if (qva >= reg->host_user_addr &&
+ qva < reg->host_user_addr + reg->size) {
+ gpa = qva - reg->host_user_addr + reg->guest_phys_addr;
+ break;
+ }
+ }
+
+exit:
+ if (mem)
+ free(mem);
+ return gpa;
+}
+
+static int
+vdpa_ifcvf_start(struct ifcvf_internal *internal)
+{
+ struct ifcvf_hw *hw = &internal->hw;
+ int i, nr_vring;
+ int vid;
+ struct rte_vhost_vring vq;
+ uint64_t gpa;
+
+ vid = internal->vid;
+ nr_vring = rte_vhost_get_vring_num(vid);
+ rte_vhost_get_negotiated_features(vid, &hw->req_features);
+
+ for (i = 0; i < nr_vring; i++) {
+ rte_vhost_get_vhost_vring(vid, i, &vq);
+ gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
+ return -1;
+ }
+ hw->vring[i].desc = gpa;
+
+ gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for available ring.");
+ return -1;
+ }
+ hw->vring[i].avail = gpa;
+
+ gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for used ring.");
+ return -1;
+ }
+ hw->vring[i].used = gpa;
+
+ hw->vring[i].size = vq.size;
+ rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
+ &hw->vring[i].last_used_idx);
+ }
+ hw->nr_vring = i;
+
+ return ifcvf_start_hw(&internal->hw);
+}
+
+static void
+vdpa_ifcvf_stop(struct ifcvf_internal *internal)
+{
+ struct ifcvf_hw *hw = &internal->hw;
+ uint32_t i;
+ int vid;
+
+ vid = internal->vid;
+ ifcvf_stop_hw(hw);
+
+ for (i = 0; i < hw->nr_vring; i++)
+ rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+ hw->vring[i].last_used_idx);
+}
+
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+ sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
+static int
+vdpa_enable_vfio_intr(struct ifcvf_internal *internal)
+{
+ int ret;
+ uint32_t i, nr_vring;
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int *fd_ptr;
+ struct rte_vhost_vring vring;
+
+ nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = sizeof(irq_set_buf);
+ irq_set->count = nr_vring + 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = 0;
+ fd_ptr = (int *)&irq_set->data;
+ fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
+
+ for (i = 0; i < nr_vring; i++) {
+ rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+ fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+ }
+
+ ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret) {
+ DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
+{
+ int ret;
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = sizeof(irq_set_buf);
+ irq_set->count = 0;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret) {
+ DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static void *
+notify_relay(void *arg)
+{
+ int i, kickfd, epfd, nfds = 0;
+ uint32_t qid, q_num;
+ struct epoll_event events[IFCVF_MAX_QUEUES * 2];
+ struct epoll_event ev;
+ uint64_t buf;
+ int nbytes;
+ struct rte_vhost_vring vring;
+ struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
+ struct ifcvf_hw *hw = &internal->hw;
+
+ q_num = rte_vhost_get_vring_num(internal->vid);
+
+ epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
+ if (epfd < 0) {
+ DRV_LOG(ERR, "failed to create epoll instance.");
+ return NULL;
+ }
+ internal->epfd = epfd;
+
+ for (qid = 0; qid < q_num; qid++) {
+ ev.events = EPOLLIN | EPOLLPRI;
+ rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
+ ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
+ DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
+ return NULL;
+ }
+ }
+
+ for (;;) {
+ nfds = epoll_wait(epfd, events, q_num, -1);
+ if (nfds < 0) {
+ if (errno == EINTR)
+ continue;
+ DRV_LOG(ERR, "epoll_wait return fail\n");
+ return NULL;
+ }
+
+ for (i = 0; i < nfds; i++) {
+ qid = events[i].data.u32;
+ kickfd = (uint32_t)(events[i].data.u64 >> 32);
+ do {
+ nbytes = read(kickfd, &buf, 8);
+ if (nbytes < 0) {
+ if (errno == EINTR ||
+ errno == EWOULDBLOCK ||
+ errno == EAGAIN)
+ continue;
+ DRV_LOG(INFO, "Error reading "
+ "kickfd: %s",
+ strerror(errno));
+ }
+ break;
+ } while (1);
+
+ ifcvf_notify_queue(hw, qid);
+ }
+ }
+
+ return NULL;
+}
+
+static int
+setup_notify_relay(struct ifcvf_internal *internal)
+{
+ int ret;
+
+ ret = pthread_create(&internal->tid, NULL, notify_relay,
+ (void *)internal);
+ if (ret) {
+ DRV_LOG(ERR, "failed to create notify relay pthread.");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+unset_notify_relay(struct ifcvf_internal *internal)
+{
+ void *status;
+
+ if (internal->tid) {
+ pthread_cancel(internal->tid);
+ pthread_join(internal->tid, &status);
+ }
+ internal->tid = 0;
+
+ if (internal->epfd >= 0)
+ close(internal->epfd);
+ internal->epfd = -1;
+
+ return 0;
+}
+
+static int
+update_datapath(struct ifcvf_internal *internal)
+{
+ int ret;
+
+ rte_spinlock_lock(&internal->lock);
+
+ if (!rte_atomic32_read(&internal->running) &&
+ (rte_atomic32_read(&internal->started) &&
+ rte_atomic32_read(&internal->dev_attached))) {
+ ret = ifcvf_dma_map(internal, 1);
+ if (ret)
+ goto err;
+
+ ret = vdpa_enable_vfio_intr(internal);
+ if (ret)
+ goto err;
+
+ ret = setup_notify_relay(internal);
+ if (ret)
+ goto err;
+
+ ret = vdpa_ifcvf_start(internal);
+ if (ret)
+ goto err;
+
+ rte_atomic32_set(&internal->running, 1);
+ } else if (rte_atomic32_read(&internal->running) &&
+ (!rte_atomic32_read(&internal->started) ||
+ !rte_atomic32_read(&internal->dev_attached))) {
+ vdpa_ifcvf_stop(internal);
+
+ ret = unset_notify_relay(internal);
+ if (ret)
+ goto err;
+
+ ret = vdpa_disable_vfio_intr(internal);
+ if (ret)
+ goto err;
+
+ ret = ifcvf_dma_map(internal, 0);
+ if (ret)
+ goto err;
+
+ rte_atomic32_set(&internal->running, 0);
+ }
+
+ rte_spinlock_unlock(&internal->lock);
+ return 0;
+err:
+ rte_spinlock_unlock(&internal->lock);
+ return ret;
+}
+
+static int
+ifcvf_dev_config(int vid)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+ internal->vid = vid;
+ rte_atomic32_set(&internal->dev_attached, 1);
+ update_datapath(internal);
+
+ return 0;
+}
+
+static int
+ifcvf_dev_close(int vid)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+ rte_atomic32_set(&internal->dev_attached, 0);
+ update_datapath(internal);
+
+ return 0;
+}
+
+static int
+ifcvf_get_vfio_group_fd(int vid)
+{
+ int did;
+ struct internal_list *list;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ return list->internal->vfio_group_fd;
+}
+
+static int
+ifcvf_get_vfio_device_fd(int vid)
+{
+ int did;
+ struct internal_list *list;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ return list->internal->vfio_dev_fd;
+}
+
+static int
+ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+ struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ int ret;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+
+ reg.index = ifcvf_get_notify_region(&internal->hw);
+ ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
+ if (ret) {
+ DRV_LOG(ERR, "Get not get device region info: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
+ *size = 0x1000;
+
+ return 0;
+}
+
+static int
+ifcvf_get_queue_num(int did, uint32_t *queue_num)
+{
+ struct internal_list *list;
+
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ *queue_num = list->internal->max_queues;
+
+ return 0;
+}
+
+static int
+ifcvf_get_vdpa_features(int did, uint64_t *features)
+{
+ struct internal_list *list;
+
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ *features = list->internal->features;
+
+ return 0;
+}
+
+#define VDPA_SUPPORTED_PROTOCOL_FEATURES \
+ (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
+ 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD)
+static int
+ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
+{
+ *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
+ return 0;
+}
+
+struct rte_vdpa_dev_ops ifcvf_ops = {
+ .get_queue_num = ifcvf_get_queue_num,
+ .get_features = ifcvf_get_vdpa_features,
+ .get_protocol_features = ifcvf_get_protocol_features,
+ .dev_conf = ifcvf_dev_config,
+ .dev_close = ifcvf_dev_close,
+ .set_vring_state = NULL,
+ .set_features = NULL,
+ .migration_done = NULL,
+ .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
+ .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
+ .get_notify_area = ifcvf_get_notify_area,
+};
+
+static int
+ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ uint64_t features;
+ struct ifcvf_internal *internal = NULL;
+ struct internal_list *list = NULL;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ list = rte_zmalloc("ifcvf", sizeof(*list), 0);
+ if (list == NULL)
+ goto error;
+
+ internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
+ if (internal == NULL)
+ goto error;
+
+ internal->pdev = pci_dev;
+ rte_spinlock_init(&internal->lock);
+ if (ifcvf_vfio_setup(internal) < 0)
+ return -1;
+
+ internal->max_queues = IFCVF_MAX_QUEUES;
+ features = ifcvf_get_features(&internal->hw);
+ internal->features = (features &
+ ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
+
+ internal->dev_addr.pci_addr = pci_dev->addr;
+ internal->dev_addr.type = PCI_ADDR;
+ list->internal = internal;
+
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_INSERT_TAIL(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+
+ internal->did = rte_vdpa_register_device(&internal->dev_addr,
+ &ifcvf_ops);
+ if (internal->did < 0)
+ goto error;
+
+ rte_atomic32_set(&internal->started, 1);
+ update_datapath(internal);
+
+ return 0;
+
+error:
+ rte_free(list);
+ rte_free(internal);
+ return -1;
+}
+
+static int
+ifcvf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct ifcvf_internal *internal;
+ struct internal_list *list;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ list = find_internal_resource_by_dev(pci_dev);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
+ return -1;
+ }
+
+ internal = list->internal;
+ rte_atomic32_set(&internal->started, 0);
+ update_datapath(internal);
+
+ rte_pci_unmap_device(internal->pdev);
+ rte_vfio_container_destroy(internal->vfio_container_fd);
+ rte_vdpa_unregister_device(internal->did);
+
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_REMOVE(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+
+ rte_free(list);
+ rte_free(internal);
+
+ return 0;
+}
+
+/*
+ * IFCVF has the same vendor ID and device ID as virtio net PCI
+ * device, with its specific subsystem vendor ID and device ID.
+ */
+static const struct rte_pci_id pci_id_ifcvf_map[] = {
+ { .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = IFCVF_VENDOR_ID,
+ .device_id = IFCVF_DEVICE_ID,
+ .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
+ .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
+ },
+
+ { .vendor_id = 0, /* sentinel */
+ },
+};
+
+static struct rte_pci_driver rte_ifcvf_vdpa = {
+ .id_table = pci_id_ifcvf_map,
+ .drv_flags = 0,
+ .probe = ifcvf_pci_probe,
+ .remove = ifcvf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");
+
+RTE_INIT(ifcvf_vdpa_init_log);
+static void
+ifcvf_vdpa_init_log(void)
+{
+ ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa");
+ if (ifcvf_vdpa_logtype >= 0)
+ rte_log_set_level(ifcvf_vdpa_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/ifc/rte_ifcvf_version.map b/drivers/net/ifc/rte_ifcvf_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/net/ifc/rte_ifcvf_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index d0804fc5..7b6af353 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -20,9 +20,10 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
-CFLAGS_ixgbe_rxtx.o += -wd3656
+CFLAGS_ixgbe_rxtx.o += -diag-disable 3656
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
#
@@ -103,6 +104,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf_representor.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 44832585..87d2ad09 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -20,7 +20,6 @@
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_eal.h>
@@ -60,9 +59,6 @@
*/
#define IXGBE_FC_LO 0x40
-/* Default minimum inter-interrupt interval for EITR configuration */
-#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E
-
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
@@ -101,8 +97,6 @@
#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
-#define IXGBE_HKEY_MAX_INDEX 10
-
/* Additional timesync values. */
#define NSEC_PER_SEC 1000000000L
#define IXGBE_INCVAL_10GB 0x66666666
@@ -118,7 +112,6 @@
#define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
-#define DEFAULT_ETAG_ETYPE 0x893f
#define IXGBE_ETAG_ETYPE 0x00005084
#define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
#define IXGBE_ETAG_ETYPE_VALID 0x80000000
@@ -133,7 +126,7 @@
#define IXGBE_EXVET_VET_EXT_SHIFT 16
#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
-static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
@@ -196,6 +189,9 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
uint16_t queue, bool on);
static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
int on);
+static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+ int mask);
+static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
@@ -228,7 +224,7 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param);
static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
-static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
+static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
static bool is_device_supported(struct rte_eth_dev *dev,
@@ -244,8 +240,8 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static int ixgbevf_dev_reset(struct rte_eth_dev *dev);
-static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
-static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
+static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -253,6 +249,7 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue, int on);
+static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
@@ -286,7 +283,7 @@ static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
-static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
+static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
@@ -328,6 +325,11 @@ static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
+static int ixgbe_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+
static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
static int ixgbevf_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs);
@@ -565,6 +567,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.get_eeprom_length = ixgbe_get_eeprom_length,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
.get_dcb_info = ixgbe_dev_get_dcb_info,
.timesync_adjust_time = ixgbe_timesync_adjust_time,
.timesync_read_time = ixgbe_timesync_read_time,
@@ -787,58 +791,6 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
sizeof(rte_ixgbevf_stats_strings[0]))
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
/*
* This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
*/
@@ -1096,7 +1048,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
* It returns 0 on success.
*/
static int
-eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
+eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -1339,6 +1291,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
+ int retries = 0;
+ int ret;
PMD_INIT_FUNC_TRACE();
@@ -1359,8 +1313,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
- rte_intr_callback_unregister(intr_handle,
- ixgbe_dev_interrupt_handler, eth_dev);
+
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ return ret;
+ }
+ rte_delay_ms(100);
+ } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
/* uninitialize PF if max_vfs not zero */
ixgbe_pf_host_uninit(eth_dev);
@@ -1522,7 +1488,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
l2_tn_info->e_tag_en = FALSE;
l2_tn_info->e_tag_fwd_en = FALSE;
- l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
+ l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG;
return 0;
}
@@ -1641,7 +1607,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
ixgbevf_dev_stats_reset(eth_dev);
/* Disable the interrupts for VF */
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(eth_dev);
hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
diag = hw->mac.ops.reset_hw(hw);
@@ -1710,7 +1676,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
rte_intr_callback_register(intr_handle,
ixgbevf_dev_interrupt_handler, eth_dev);
rte_intr_enable(intr_handle);
- ixgbevf_intr_enable(hw);
+ ixgbevf_intr_enable(eth_dev);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
eth_dev->data->port_id, pci_dev->id.vendor_id,
@@ -1743,7 +1709,7 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = NULL;
/* Disable the interrupts for VF */
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(eth_dev);
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -1755,16 +1721,81 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
-{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
+static int
+eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *pf_ethdev;
+ struct rte_eth_devargs eth_da;
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ &eth_da);
+ if (retval)
+ return retval;
+ } else
+ memset(&eth_da, 0, sizeof(eth_da));
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct ixgbe_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_ixgbe_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ /* probe VF representor ports */
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct ixgbe_vf_info *vfinfo;
+ struct ixgbe_vf_representor representor;
+
+ vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ pf_ethdev->data->dev_private);
+ if (vfinfo == NULL) {
+ PMD_DRV_LOG(ERR,
+ "no virtual functions supported by PF");
+ break;
+ }
+
+ representor.vf_id = eth_da.representor_ports[i];
+ representor.switch_domain_id = vfinfo->switch_domain_id;
+ representor.pf_ethdev = pf_ethdev;
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name,
+ eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct ixgbe_vf_representor), NULL, NULL,
+ ixgbe_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
+ "representor %s.", name);
+ }
+
+ return 0;
}
static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit);
}
static struct rte_pci_driver rte_ixgbe_pmd = {
@@ -1947,10 +1978,13 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
rxq = dev->data->rx_queues[queue];
- if (on)
+ if (on) {
rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- else
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ } else {
rxq->vlan_flags = PKT_RX_VLAN;
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
}
static void
@@ -2001,64 +2035,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ctrl;
- uint16_t i;
- struct ixgbe_rx_queue *rxq;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- ctrl &= ~IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- } else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
- /* record those setting for HW strip per queue */
- ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
- }
- }
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ctrl;
- uint16_t i;
- struct ixgbe_rx_queue *rxq;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- ctrl |= IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- } else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
- /* record those setting for HW strip per queue */
- ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
- }
- }
-}
-
static void
ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
{
@@ -2114,25 +2090,93 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
*/
}
-static int
-ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint32_t ctrl;
+ uint16_t i;
+ struct ixgbe_rx_queue *rxq;
+ bool on;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ } else {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl &= ~IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ }
+ } else {
+ /*
+ * Other 10G NIC, the VLAN strip can be setup
+ * per queue in RXDCTL
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl |= IXGBE_RXDCTL_VME;
+ on = TRUE;
+ } else {
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ on = FALSE;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+ }
+ }
+}
+
+static void
+ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
{
+ uint16_t i;
+ struct rte_eth_rxmode *rxmode;
+ struct ixgbe_rx_queue *rxq;
+
if (mask & ETH_VLAN_STRIP_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- ixgbe_vlan_hw_strip_enable_all(dev);
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
else
- ixgbe_vlan_hw_strip_disable_all(dev);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ }
+}
+
+static int
+ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ ixgbe_vlan_hw_strip_config(dev);
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
ixgbe_vlan_hw_filter_enable(dev);
else
ixgbe_vlan_hw_filter_disable(dev);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
ixgbe_vlan_hw_extend_enable(dev);
else
ixgbe_vlan_hw_extend_disable(dev);
@@ -2141,6 +2185,16 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return 0;
}
+static int
+ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbe_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
static void
ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
@@ -2289,11 +2343,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
- if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
- PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
- IXGBE_DCB_NB_QUEUES);
- return -EINVAL;
- }
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
@@ -2307,11 +2356,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
- if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
- PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
- IXGBE_DCB_NB_QUEUES);
- return -EINVAL;
- }
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
@@ -2480,6 +2524,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
+ uint32_t allowed_speeds = 0;
int mask = 0;
int status;
uint16_t vf, idx;
@@ -2561,7 +2606,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- err = ixgbe_vlan_offload_set(dev, mask);
+ err = ixgbe_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
goto error;
@@ -2628,9 +2673,21 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
if (err)
goto error;
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G |
+ ETH_LINK_SPEED_10G;
+ break;
+ default:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G;
+ }
+
link_speeds = &dev->data->dev_conf.link_speeds;
- if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G)) {
+ if (*link_speeds & ~allowed_speeds) {
PMD_INIT_LOG(ERR, "Invalid link setting");
goto error;
}
@@ -2656,6 +2713,10 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
} else {
if (*link_speeds & ETH_LINK_SPEED_10G)
speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_5G)
+ speed |= IXGBE_LINK_SPEED_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
if (*link_speeds & ETH_LINK_SPEED_1G)
speed |= IXGBE_LINK_SPEED_1GB_FULL;
if (*link_speeds & ETH_LINK_SPEED_100M)
@@ -2666,6 +2727,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
if (err)
goto error;
+ ixgbe_dev_link_update(dev, 0);
+
skip_link_setup:
if (rte_intr_allow_others(intr_handle)) {
@@ -2757,7 +2820,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
@@ -2881,7 +2944,7 @@ ixgbe_dev_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
- ret = eth_ixgbe_dev_init(dev);
+ ret = eth_ixgbe_dev_init(dev, NULL);
return ret;
}
@@ -3625,7 +3688,6 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
@@ -3647,54 +3709,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
-
- /*
- * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
- * mode.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540) &&
- !RTE_ETH_DEV_SRIOV(dev).active)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
-
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
- if (dev->security_ctx) {
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
- }
-#endif
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3704,6 +3723,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3714,8 +3734,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -3784,7 +3803,6 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
@@ -3796,17 +3814,11 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3816,6 +3828,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3826,8 +3839,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
},
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -3863,7 +3875,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
* before the link status is correct
*/
- if (mac->type == ixgbe_mac_82599_vf) {
+ if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
int i;
for (i = 0; i < 5; i++) {
@@ -3941,12 +3953,12 @@ out:
}
/* return 0 means link status changed, -1 means not changed */
-static int
+int
ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
int wait_to_complete, int vf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -3956,12 +3968,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
int wait = 1;
bool autoneg = false;
+ memset(&link, 0, sizeof(link));
link.link_status = ETH_LINK_DOWN;
- link.link_speed = 0;
+ link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_autoneg = ETH_LINK_AUTONEG;
- memset(&old, 0, sizeof(old));
- rte_ixgbe_dev_atomic_read_link_status(dev, &old);
hw->mac.get_link_status = true;
@@ -3985,19 +3996,14 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
if (diag != 0) {
link.link_speed = ETH_SPEED_NUM_100M;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
if (link_up == 0) {
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
+
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -4029,12 +4035,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
link.link_speed = ETH_SPEED_NUM_10G;
break;
}
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
-
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -4233,8 +4235,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
+
if (link.link_status) {
PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
@@ -4269,7 +4271,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int64_t timeout;
- struct rte_eth_link link;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -4286,9 +4287,10 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
}
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ struct rte_eth_link link;
+
/* get the link status before link update, for predicting later */
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
ixgbe_dev_link_update(dev, 0);
@@ -4853,14 +4855,15 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
ixgbe_clear_rar(hw, index);
}
-static void
+static int
ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
ixgbe_remove_rar(dev, 0);
-
ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+
+ return 0;
}
static bool
@@ -4909,10 +4912,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4932,19 +4937,32 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
* Virtual Function operations
*/
static void
-ixgbevf_intr_disable(struct ixgbe_hw *hw)
+ixgbevf_intr_disable(struct rte_eth_dev *dev)
{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
PMD_INIT_FUNC_TRACE();
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear mask value. */
+ intr->mask = 0;
}
static void
-ixgbevf_intr_enable(struct ixgbe_hw *hw)
+ixgbevf_intr_enable(struct rte_eth_dev *dev)
{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
PMD_INIT_FUNC_TRACE();
/* VF enable interrupt autoclean */
@@ -4953,6 +4971,9 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
IXGBE_WRITE_FLUSH(hw);
+
+ /* Save IXGBE_VTEIMS value to mask. */
+ intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
}
static int
@@ -4970,14 +4991,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.hw_strip_crc) {
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
#endif
@@ -5030,7 +5051,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
/* Set HW strip */
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- err = ixgbevf_vlan_offload_set(dev, mask);
+ err = ixgbevf_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
ixgbe_dev_clear_queues(dev);
@@ -5039,6 +5060,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
ixgbevf_dev_rxtx_start(dev);
+ ixgbevf_dev_link_update(dev, 0);
+
/* check and configure queue intr-vector mapping */
if (rte_intr_cap_multiple(intr_handle) &&
dev->data->dev_conf.intr_conf.rxq) {
@@ -5074,7 +5097,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
rte_intr_enable(intr_handle);
/* Re-enable interrupt for VF */
- ixgbevf_intr_enable(hw);
+ ixgbevf_intr_enable(dev);
return 0;
}
@@ -5088,7 +5111,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(dev);
hw->adapter_stopped = 1;
ixgbe_stop_adapter(hw);
@@ -5226,24 +5249,34 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
}
static int
-ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_rx_queue *rxq;
uint16_t i;
int on = 0;
/* VF function only support hw strip feature, others are not support */
if (mask & ETH_VLAN_STRIP_MASK) {
- on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
- for (i = 0; i < hw->mac.max_rx_queues; i++)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
ixgbevf_vlan_strip_queue_set(dev, i, on);
+ }
}
return 0;
}
+static int
+ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbevf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
int
ixgbe_vt_check(struct ixgbe_hw *hw)
{
@@ -5580,17 +5613,17 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- uint32_t mask;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t vec = IXGBE_MISC_VEC_ID;
- mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
if (rte_intr_allow_others(intr_handle))
vec = IXGBE_RX_VEC_START;
- mask |= (1 << vec);
+ intr->mask |= (1 << vec);
RTE_SET_USED(queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
rte_intr_enable(intr_handle);
@@ -5600,19 +5633,19 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- uint32_t mask;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vec = IXGBE_MISC_VEC_ID;
- mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
if (rte_intr_allow_others(intr_handle))
vec = IXGBE_RX_VEC_START;
- mask &= ~(1 << vec);
+ intr->mask &= ~(1 << vec);
RTE_SET_USED(queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
return 0;
}
@@ -5778,6 +5811,13 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
if (vector_idx < base + intr_handle->nb_efd - 1)
vector_idx++;
}
+
+ /* As RX queue setting above show, all queues use the vector 0.
+ * Set only the ITR value of IXGBE_MISC_VEC_ID.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
+ IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | IXGBE_EITR_CNT_WDIS);
}
/**
@@ -5799,8 +5839,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
/* won't configure msix register if no mapping is done
* between intr vector and event fd
+ * but if misx has been enabled already, need to configure
+ * auto clean, auto mask and throttling.
*/
- if (!rte_intr_dp_is_en(intr_handle))
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ if (!rte_intr_dp_is_en(intr_handle) &&
+ !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
return;
if (rte_intr_allow_others(intr_handle))
@@ -5824,30 +5868,34 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
/* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
- for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
- queue_id++) {
- /* by default, 1:1 mapping */
- ixgbe_set_ivar_map(hw, 0, queue_id, vec);
- intr_handle->intr_vec[queue_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
- vec++;
- }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
- IXGBE_MISC_VEC_ID);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
- break;
- default:
- break;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar_map(hw, -1,
+ IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ IXGBE_MISC_VEC_ID);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+ break;
+ default:
+ break;
+ }
}
IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
- IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
+ IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | IXGBE_EITR_CNT_WDIS);
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
@@ -5863,6 +5911,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode;
uint32_t rf_dec, rf_int;
uint32_t bcnrc_val;
uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5884,14 +5933,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
bcnrc_val = 0;
}
+ rxmode = &dev->data->dev_conf.rxmode;
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
* set as 0x4.
*/
- if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
- IXGBE_MAX_JUMBO_FRAME_SIZE))
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+ (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
IXGBE_MMW_SIZE_JUMBO_FRAME);
else
@@ -5983,12 +6032,14 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
}
}
-static void
+static int
ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
+
+ return 0;
}
int
@@ -6238,7 +6289,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!rx_conf->enable_scatter &&
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -6812,9 +6863,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
uint32_t shift = 0;
/* Get current link speed. */
- memset(&link, 0, sizeof(link));
ixgbe_dev_link_update(dev, 1);
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
case ETH_SPEED_NUM_100M:
@@ -7166,6 +7216,78 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev,
return eeprom->ops.write_buffer(hw, first, length, data);
}
+static int
+ixgbe_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t status;
+ uint8_t sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0)
+ return -EIO;
+
+ if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+ PMD_DRV_LOG(ERR,
+ "Address change required to access page 0xA2, "
+ "but not supported. Please report the module "
+ "type to the driver maintainers.");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
+ uint8_t databyte = 0xFF;
+ uint8_t *data = info->data;
+ uint32_t i = 0;
+
+ if (info->length == 0)
+ return -EINVAL;
+
+ for (i = info->offset; i < info->offset + info->length; i++) {
+ if (i < RTE_ETH_MODULE_SFF_8079_LEN)
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ else
+ status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
+
+ if (status != 0)
+ return -EIO;
+
+ data[i - info->offset] = databyte;
+ }
+
+ return 0;
+}
+
uint16_t
ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
switch (mac_type) {
@@ -8169,7 +8291,7 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(dev);
/* read-on-clear nic registers here */
eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
@@ -8186,7 +8308,6 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
static int
ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -8195,7 +8316,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
intr->flags &= ~IXGBE_FLAG_MAILBOX;
}
- ixgbevf_intr_enable(hw);
+ ixgbevf_intr_enable(dev);
return 0;
}
@@ -8334,7 +8455,7 @@ ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev,
&filter_info->rss_info, TRUE);
}
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c56d6524..e42ec30d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -4,6 +4,9 @@
#ifndef _IXGBE_ETHDEV_H_
#define _IXGBE_ETHDEV_H_
+
+#include <stdint.h>
+
#include "base/ixgbe_type.h"
#include "base/ixgbe_dcb.h"
#include "base/ixgbe_dcb_82599.h"
@@ -12,6 +15,7 @@
#ifdef RTE_LIBRTE_SECURITY
#include "ixgbe_ipsec.h"
#endif
+#include <rte_flow.h>
#include <rte_time.h>
#include <rte_hash.h>
#include <rte_pci.h>
@@ -39,6 +43,7 @@
#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
#define IXGBE_VFTA_SIZE 128
#define IXGBE_VLAN_TAG_SIZE 4
+#define IXGBE_HKEY_MAX_INDEX 10
#define IXGBE_MAX_RX_QUEUE_NUM 128
#define IXGBE_MAX_INTR_QUEUE_NUM 15
#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
@@ -57,6 +62,7 @@
(((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \
IXGBE_EITR_ITR_INT_MASK)
+#define IXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */
/* Loopback operation modes */
/* 82599 specific loopback operation types */
@@ -196,8 +202,8 @@ struct ixgbe_hw_fdir_info {
};
struct ixgbe_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
- uint16_t num; /**< Number of entries in queue[]. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
@@ -253,6 +259,7 @@ struct ixgbe_vf_info {
uint16_t vlan_count;
uint8_t spoofchk_enabled;
uint8_t api_version;
+ uint16_t switch_domain_id;
};
/*
@@ -480,6 +487,15 @@ struct ixgbe_adapter {
struct ixgbe_tm_conf tm_conf;
};
+struct ixgbe_vf_representor {
+ uint16_t vf_id;
+ uint16_t switch_domain_id;
+ struct rte_eth_dev *pf_ethdev;
+};
+
+int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
+
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
@@ -652,6 +668,10 @@ int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
+int
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf);
+
/*
* misc function prototypes
*/
@@ -659,9 +679,7 @@ void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
-void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
+void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
@@ -698,6 +716,10 @@ void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
+int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add);
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index dcbfb38b..eb0644c8 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -264,8 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* Skip Ethernet */
if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error,
@@ -298,8 +298,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error,
@@ -346,7 +346,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
/**
* Only support src & dst addresses, protocol,
* others should be masked.
@@ -368,7 +368,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
filter->proto_mask = ipv4_mask->hdr.next_proto_id;
- ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
filter->dst_ip = ipv4_spec->hdr.dst_addr;
filter->src_ip = ipv4_spec->hdr.src_addr;
filter->proto = ipv4_spec->hdr.next_proto_id;
@@ -413,7 +413,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
/**
* Only support src & dst ports, tcp flags,
@@ -447,12 +447,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags;
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -471,11 +471,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
} else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
- sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -494,7 +494,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = sctp_mask->hdr.dst_port;
filter->src_port_mask = sctp_mask->hdr.src_port;
- sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ sctp_spec = item->spec;
filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port;
} else {
@@ -557,6 +557,15 @@ action:
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -699,8 +708,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Mask bits of source MAC address must be full of 0.
* Mask bits of destination MAC address must be full
@@ -787,6 +796,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
if (attr->priority) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
@@ -1000,8 +1017,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
tcp_mask->hdr.src_port ||
tcp_mask->hdr.dst_port ||
@@ -1078,6 +1095,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Support 2 priorities, the lowest or highest. */
if (!attr->priority) {
filter->hig_pri = 0;
@@ -1198,8 +1224,8 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
- e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+ e_tag_spec = item->spec;
+ e_tag_mask = item->mask;
/* Only care about GRP and E cid base. */
if (e_tag_mask->epcp_edei_in_ecid_b ||
@@ -1250,6 +1276,15 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
}
/* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
if (attr->priority) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1354,6 +1389,15 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
}
/* not supported */
+ if (attr->transfer) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
if (attr->priority) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1447,12 +1491,9 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[])
break;
if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
- spec =
- (const struct rte_flow_item_fuzzy *)item->spec;
- last =
- (const struct rte_flow_item_fuzzy *)item->last;
- mask =
- (const struct rte_flow_item_fuzzy *)item->mask;
+ spec = item->spec;
+ last = item->last;
+ mask = item->mask;
if (!spec || !mask)
return 0;
@@ -1632,7 +1673,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_spec = item->spec;
/* Get the dst MAC. */
for (j = 0; j < ETHER_ADDR_LEN; j++) {
@@ -1645,7 +1686,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->mask) {
rule->b_mask = TRUE;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_mask = item->mask;
/* Ether type should be masked. */
if (eth_mask->type ||
@@ -1725,8 +1766,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
@@ -1772,8 +1813,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- ipv4_mask =
- (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
if (ipv4_mask->hdr.version_ihl ||
ipv4_mask->hdr.type_of_service ||
ipv4_mask->hdr.total_length ||
@@ -1793,8 +1833,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- ipv4_spec =
- (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
rule->ixgbe_fdir.formatted.dst_ip[0] =
ipv4_spec->hdr.dst_addr;
rule->ixgbe_fdir.formatted.src_ip[0] =
@@ -1844,8 +1883,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
rule->b_mask = TRUE;
- ipv6_mask =
- (const struct rte_flow_item_ipv6 *)item->mask;
+ ipv6_mask = item->mask;
if (ipv6_mask->hdr.vtc_flow ||
ipv6_mask->hdr.payload_len ||
ipv6_mask->hdr.proto ||
@@ -1885,8 +1923,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- ipv6_spec =
- (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_spec = item->spec;
rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
ipv6_spec->hdr.src_addr, 16);
rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
@@ -1938,7 +1975,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
if (tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off ||
@@ -1957,7 +1994,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
tcp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
@@ -2003,7 +2040,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
if (udp_mask->hdr.dgram_len ||
udp_mask->hdr.dgram_cksum) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2017,7 +2054,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
udp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
@@ -2068,8 +2105,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
if (sctp_mask->hdr.tag ||
sctp_mask->hdr.cksum) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2083,8 +2119,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- sctp_spec =
- (const struct rte_flow_item_sctp *)item->spec;
+ sctp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
sctp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
@@ -2092,8 +2127,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
/* others even sctp port is not supported */
} else {
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
if (sctp_mask &&
(sctp_mask->hdr.src_port ||
sctp_mask->hdr.dst_port ||
@@ -2136,7 +2170,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
- raw_mask = (const struct rte_flow_item_raw *)item->mask;
+ raw_mask = item->mask;
/* check mask */
if (raw_mask->relative != 0x1 ||
@@ -2152,7 +2186,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
- raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_spec = item->spec;
/* check spec */
if (raw_spec->relative != 0 ||
@@ -2425,8 +2459,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Tunnel type is always meaningful. */
rule->mask.tunnel_type_mask = 1;
- vxlan_mask =
- (const struct rte_flow_item_vxlan *)item->mask;
+ vxlan_mask = item->mask;
if (vxlan_mask->flags) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2452,8 +2485,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->spec) {
rule->b_spec = TRUE;
- vxlan_spec = (const struct rte_flow_item_vxlan *)
- item->spec;
+ vxlan_spec = item->spec;
rte_memcpy(((uint8_t *)
&rule->ixgbe_fdir.formatted.tni_vni + 1),
vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
@@ -2490,8 +2522,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Tunnel type is always meaningful. */
rule->mask.tunnel_type_mask = 1;
- nvgre_mask =
- (const struct rte_flow_item_nvgre *)item->mask;
+ nvgre_mask = item->mask;
if (nvgre_mask->flow_id) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2534,8 +2565,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->spec) {
rule->b_spec = TRUE;
- nvgre_spec =
- (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_spec = item->spec;
if (nvgre_spec->c_k_s_rsvd0_ver !=
rte_cpu_to_be_16(0x2000) &&
nvgre_mask->c_k_s_rsvd0_ver) {
@@ -2591,7 +2621,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
return -rte_errno;
}
rule->b_mask = TRUE;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_mask = item->mask;
/* Ether type should be masked. */
if (eth_mask->type) {
@@ -2632,7 +2662,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->spec) {
rule->b_spec = TRUE;
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_spec = item->spec;
/* Get the dst MAC. */
for (j = 0; j < ETHER_ADDR_LEN; j++) {
@@ -2671,8 +2701,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
return -rte_errno;
}
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
@@ -2775,7 +2805,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
rss = (const struct rte_flow_action_rss *)act->conf;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@@ -2783,7 +2813,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -2792,14 +2822,27 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
}
- if (rss->rss_conf)
- rss_conf->rss_conf = *rss->rss_conf;
- else
- rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
- for (n = 0; n < rss->num; ++n)
- rss_conf->queue[n] = rss->queue[n];
- rss_conf->num = rss->num;
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (ixgbe_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
@@ -2830,6 +2873,15 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
@@ -2848,7 +2900,7 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
}
@@ -3167,9 +3219,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&rss_filter_ptr->filter_info,
- &rss_conf,
- sizeof(struct ixgbe_rte_flow_rss_conf));
+ ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
TAILQ_INSERT_TAIL(&filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 176ec0fd..de7ed367 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -598,13 +598,18 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
+
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
/* sanity checks */
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
@@ -624,7 +629,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
if (reg != 0) {
@@ -632,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
return -1;
}
}
- if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
IXGBE_SECTXCTRL_STORE_FORWARD);
reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index ea997371..4d199c80 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -90,6 +90,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
+ rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
+
memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
@@ -122,6 +124,7 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
{
struct ixgbe_vf_info **vfinfo;
uint16_t vf_num;
+ int ret;
PMD_INIT_FUNC_TRACE();
@@ -132,6 +135,10 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
+ ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
vf_num = dev_num_vf(eth_dev);
if (vf_num == 0)
return;
@@ -329,10 +336,7 @@ set_rx_mode(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- ixgbe_vlan_hw_strip_enable_all(dev);
- else
- ixgbe_vlan_hw_strip_disable_all(dev);
+ ixgbe_vlan_hw_strip_config(dev);
}
static inline void
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 6c582b4b..3e13d26a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+ if ((txq->offloads == 0) &&
#ifdef RTE_LIBRTE_SECURITY
!(txq->using_ipsec) &&
#endif
@@ -2398,9 +2398,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
PMD_INIT_LOG(DEBUG,
- " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
- (unsigned long)txq->txq_flags,
- (unsigned long)IXGBE_SIMPLE_FLAGS);
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2409,45 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
}
}
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
int __attribute__((cold))
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2421,10 +2459,13 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -2550,7 +2591,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIBRTE_SECURITY
@@ -2769,6 +2810,81 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
#endif
}
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ if (ixgbe_is_vf(dev) == 0)
+ offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+ /*
+ * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+ * mode.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) &&
+ !RTE_ETH_DEV_SRIOV(dev).active)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+ return offloads;
+}
+
int __attribute__((cold))
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2783,10 +2899,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -2816,10 +2935,11 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
/*
* The packet type in RX descriptor is different for different NICs.
@@ -4574,7 +4694,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;
- if (!rsc_capable && rx_conf->enable_lro) {
+ if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
"support it");
return -EINVAL;
@@ -4582,7 +4702,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+ (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
* 3.0 RSC configuration requires HW CRC stripping being
@@ -4596,7 +4717,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RFCTL configuration */
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
- if ((rsc_capable) && (rx_conf->enable_lro))
+ if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
/*
* Since NFS packets coalescing is not supported - clear
* RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
@@ -4609,7 +4730,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
- if (!rx_conf->enable_lro)
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
return 0;
/* Set RDRXCTL.RSCACKC bit */
@@ -4666,7 +4787,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
* at most 500us latency for a single RSC aggregation.
*/
eitr &= ~IXGBE_EITR_ITR_INT_MASK;
- eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
+ eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
+ eitr |= IXGBE_EITR_CNT_WDIS;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
@@ -4729,7 +4851,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rx_conf->hw_strip_crc)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
else
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -4737,7 +4859,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure jumbo frame support, if any.
*/
- if (rx_conf->jumbo_frame == 1) {
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
@@ -4757,6 +4879,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -4765,7 +4892,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+ rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
+ 0 : ETHER_CRC_LEN;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
@@ -4779,28 +4907,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
- /*
- * Configure Header Split
- */
- if (rx_conf->header_split) {
- if (hw->mac.type == ixgbe_mac_82599EB) {
- /* Must setup the PSRTYPE register */
- uint32_t psrtype;
-
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
- }
- srrctl = ((rx_conf->split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
-#endif
- srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
@@ -4826,9 +4933,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
- if (rx_conf->enable_scatter)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
/*
@@ -4843,7 +4952,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
- if (rx_conf->hw_ip_checksum)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= IXGBE_RXCSUM_IPPCSE;
else
rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -4853,7 +4962,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rx_conf->hw_strip_crc)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
else
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -5259,6 +5368,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -5277,7 +5387,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
@@ -5289,6 +5399,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
struct ixgbe_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint64_t bus_addr;
uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
@@ -5328,6 +5439,11 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
ixgbevf_rlpml_set_vf(hw,
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -5351,18 +5467,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
- /*
- * Configure Header Split
- */
- if (dev->data->dev_conf.rxmode.header_split) {
- srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
-#endif
- srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
@@ -5387,24 +5492,18 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.enable_scatter ||
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
- (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ (rxmode->max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
}
- }
-#ifdef RTE_HEADER_SPLIT_ENABLE
- if (dev->data->dev_conf.rxmode.header_split)
- /* Must setup the PSRTYPE register */
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-#endif
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
/* Set RQPL for VF RSS according to max Rx queue */
psrtype |= (dev->data->nb_rx_queues >> 1) <<
@@ -5522,6 +5621,40 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
}
int
+ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add)
{
@@ -5531,7 +5664,12 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
uint16_t j;
uint16_t sp_reta_size;
uint32_t reta_reg;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@@ -5541,8 +5679,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
if (!add) {
- if (memcmp(conf, &filter_info->rss_info,
- sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) {
+ if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
ixgbe_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct ixgbe_rte_flow_rss_conf));
@@ -5551,7 +5689,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table
* The byte-swap is needed because NIC registers are in
@@ -5561,9 +5699,9 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
- if (j == conf->num)
+ if (j == conf->conf.queue_num)
j = 0;
- reta = (reta << 8) | conf->queue[j];
+ reta = (reta << 8) | conf->conf.queue[j];
if ((i & 3) == 3)
IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
@@ -5580,8 +5718,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
rss_conf.rss_key = rss_intel_key; /* Default hash key */
ixgbe_hw_rss_hash_set(hw, &rss_conf);
- rte_memcpy(&filter_info->rss_info,
- conf, sizeof(struct ixgbe_rte_flow_rss_conf));
+ if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 69c718bc..39378f75 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -129,6 +129,7 @@ struct ixgbe_rx_queue {
uint8_t rx_deferred_start; /**< not in global dev start. */
/** flags to set in mbuf when a vlan is detected. */
uint64_t vlan_flags;
+ uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
@@ -221,7 +222,7 @@ struct ixgbe_tx_queue {
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
- uint32_t txq_flags; /**< Holds flags for this TXq */
+ uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
uint32_t ctx_curr; /**< Hardware context states. */
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -240,20 +241,6 @@ struct ixgbe_txq_ops {
};
/*
- * The "simple" TX queue functions require that the following
- * flags are set when the TX queue is configured:
- * - ETH_TXQ_FLAGS_NOMULTSEGS
- * - ETH_TXQ_FLAGS_NOVLANOFFL
- * - ETH_TXQ_FLAGS_NOXSUMSCTP
- * - ETH_TXQ_FLAGS_NOXSUMUDP
- * - ETH_TXQ_FLAGS_NOXSUMTCP
- * and that the RS bit threshold (tx_rs_thresh) is at least equal to
- * RTE_PMD_IXGBE_TX_MAX_BURST.
- */
-#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
-/*
* Populate descriptors with the following info:
* 1.) buffer_addr = phys_addr + headroom
* 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
@@ -305,6 +292,11 @@ extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
-
#endif /* RTE_IXGBE_INC_VECTOR */
+
+uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
+
#endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 414840a2..a97c2718 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -278,17 +278,12 @@ static inline int
ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
- /* no header split support */
- if (rxmode->header_split == 1)
- return -1;
-
return 0;
#else
RTE_SET_USED(dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e0f9998f..edb13835 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
/* no csum error report support */
- if (rxmode->hw_ip_checksum == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
return -1;
return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
new file mode 100644
index 00000000..db516d99
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "rte_pmd_ixgbe.h"
+
+
+static int
+ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return ixgbe_dev_link_update_share(representor->pf_ethdev,
+ wait_to_complete, 0);
+}
+
+static int
+ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_ixgbe_set_vf_mac_addr(
+ representor->pf_ethdev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static void
+ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(
+ representor->pf_ethdev->data->dev_private);
+
+ dev_info->device = representor->pf_ethdev->device;
+
+ dev_info->min_rx_bufsize = 1024;
+ /**< Minimum size of RX buffer. */
+ dev_info->max_rx_pktlen = 9728;
+ /**< Maximum configurable length of RX pkt. */
+ dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ /**< Maximum number of RX queues. */
+ dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+ /**< Maximum number of TX queues. */
+
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ /**< Maximum number of MAC addresses. */
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ /**< Device RX offload capabilities. */
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ /**< Device TX offload capabilities. */
+
+ dev_info->speed_capa =
+ representor->pf_ethdev->data->dev_link.link_speed;
+ /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+
+ dev_info->switch_info.name =
+ representor->pf_ethdev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int ixgbe_vf_representor_dev_configure(
+ __rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_rx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_tx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_ixgbe_set_vf_vlan_filter(
+ representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on);
+}
+
+static void
+ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id,
+ representor->vf_id, on);
+}
+
+struct eth_dev_ops ixgbe_vf_representor_dev_ops = {
+ .dev_infos_get = ixgbe_vf_representor_dev_infos_get,
+
+ .dev_start = ixgbe_vf_representor_dev_start,
+ .dev_configure = ixgbe_vf_representor_dev_configure,
+ .dev_stop = ixgbe_vf_representor_dev_stop,
+
+ .rx_queue_setup = ixgbe_vf_representor_rx_queue_setup,
+ .tx_queue_setup = ixgbe_vf_representor_tx_queue_setup,
+
+ .link_update = ixgbe_vf_representor_link_update,
+
+ .vlan_filter_set = ixgbe_vf_representor_vlan_filter_set,
+ .vlan_strip_queue_set = ixgbe_vf_representor_vlan_strip_queue_set,
+
+ .mac_addr_set = ixgbe_vf_representor_mac_addr_set,
+};
+
+static uint16_t
+ixgbe_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+ixgbe_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_vf_info *vf_data;
+ struct rte_pci_device *pci_dev;
+ struct rte_eth_link *link;
+
+ if (!representor)
+ return -ENOMEM;
+
+ representor->vf_id =
+ ((struct ixgbe_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct ixgbe_vf_representor *)init_params)->switch_domain_id;
+ representor->pf_ethdev =
+ ((struct ixgbe_vf_representor *)init_params)->pf_ethdev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev);
+
+ if (representor->vf_id >= pci_dev->max_vfs)
+ return -ENODEV;
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &ixgbe_vf_representor_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = ixgbe_vf_representor_rx_burst;
+ ethdev->tx_pkt_burst = ixgbe_vf_representor_tx_burst;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ /* Reference VF mac address from PF data structure */
+ vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ representor->pf_ethdev->data->dev_private);
+
+ ethdev->data->mac_addrs = (struct ether_addr *)
+ vf_data[representor->vf_id].vf_mac_addresses;
+
+ /* Link state. Inherited from PF */
+ link = &representor->pf_ethdev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+int
+ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+ return 0;
+}
diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build
index 60af0bae..02d5ef5e 100644
--- a/drivers/net/ixgbe/meson.build
+++ b/drivers/net/ixgbe/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
+
cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS']
subdir('base')
@@ -17,6 +19,7 @@ sources = files(
'ixgbe_pf.c',
'ixgbe_rxtx.c',
'ixgbe_tm.c',
+ 'ixgbe_vf_representor.c',
'rte_pmd_ixgbe.c'
)
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index d8ca8ca3..3a874f9a 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -5,6 +5,7 @@
#include <rte_ethdev_driver.h>
#include "base/ixgbe_api.h"
+#include "base/ixgbe_x550.h"
#include "ixgbe_ethdev.h"
#include "rte_pmd_ixgbe.h"
@@ -880,6 +881,34 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
return 0;
}
+int __rte_experimental
+rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t fctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+ /* If 'enable' set the SBP bit else clear it */
+ if (enable)
+ fctrl |= IXGBE_FCTRL_SBP;
+ else
+ fctrl &= ~(IXGBE_FCTRL_SBP);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ return 0;
+}
+
#ifdef RTE_LIBRTE_IXGBE_BYPASS
int
rte_pmd_ixgbe_bypass_init(uint16_t port_id)
@@ -1012,3 +1041,204 @@ rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
return ixgbe_bypass_wd_reset(dev);
}
#endif
+
+/**
+ * rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared phy token as needed
+ */
+STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask)
+{
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status = IXGBE_SUCCESS;
+
+ while (--retries) {
+ status = ixgbe_acquire_swfw_semaphore(hw, mask);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n",
+ status);
+ return status;
+ }
+ status = ixgbe_get_phy_token(hw);
+ if (status == IXGBE_SUCCESS)
+ return IXGBE_SUCCESS;
+
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n",
+ status);
+
+ ixgbe_release_swfw_semaphore(hw, mask);
+ if (status != IXGBE_ERR_TOKEN_RETRY) {
+ PMD_DRV_LOG(ERR,
+ "Retry get PHY token failed, Status=%d\n",
+ status);
+ return status;
+ }
+ }
+ PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
+ return status;
+}
+
+/**
+ * rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and puts the shared phy token as needed
+ */
+STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask)
+{
+ ixgbe_put_phy_token(hw);
+ ixgbe_release_swfw_semaphore(hw, mask);
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_lock(uint16_t port)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ return IXGBE_SUCCESS;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlock(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+ struct ixgbe_hw *hw;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ rte_pmd_ixgbe_release_swfw(hw, swfw_mask);
+
+ return IXGBE_SUCCESS;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 i, data, command;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* Setup and write the read command */
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+ if (command & IXGBE_MSCA_MDI_COMMAND)
+ return IXGBE_ERR_PHY;
+
+ /* Read operation is complete. Get the data from MSRWD */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)data;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data)
+{
+ struct ixgbe_hw *hw;
+ u32 i, command;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the write command */
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+ if (command & IXGBE_MSCA_MDI_COMMAND) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+ return 0;
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 11a9f334..72a941f9 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -573,6 +573,77 @@ int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout);
*/
int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port);
+/**
+ * Acquire swfw semaphore lock for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_SWFW_SYNC) If sw/fw semaphore acquisition failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_lock(uint16_t port);
+
+/**
+ * Release swfw semaphore lock used for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlock(uint16_t port);
+
+/**
+ * Read PHY register using MDIO without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Pointer for reading PHY register data
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data);
+
+/**
+ * Write data to PHY register using without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Data to write to PHY register
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data);
/**
* Response sent back to ixgbe driver from user app after callback
@@ -637,4 +708,17 @@ enum {
((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \
(x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM))
+/**
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param enable
+ * 0 to disable and nonzero to enable 'SBP' bit in FCTRL register
+ * to receive all packets
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int __rte_experimental
+rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable);
#endif /* _PMD_IXGBE_H_ */
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index bf776742..c814f96d 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -52,3 +52,13 @@ DPDK_17.08 {
rte_pmd_ixgbe_bypass_wd_timeout_show;
rte_pmd_ixgbe_bypass_wd_timeout_store;
} DPDK_17.05;
+
+EXPERIMENTAL {
+ global:
+
+ rte_pmd_ixgbe_mdio_lock;
+ rte_pmd_ixgbe_mdio_unlock;
+ rte_pmd_ixgbe_mdio_unlocked_read;
+ rte_pmd_ixgbe_mdio_unlocked_write;
+ rte_pmd_ixgbe_upd_fctrl_sbp;
+};
diff --git a/drivers/net/kni/Makefile b/drivers/net/kni/Makefile
index 01eaef05..562e8d2d 100644
--- a/drivers/net/kni/Makefile
+++ b/drivers/net/kni/Makefile
@@ -10,6 +10,7 @@ LIB = librte_pmd_kni.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lpthread
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_kni
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index dc4e65f5..ab63ea42 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -61,10 +61,15 @@ static const struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG,
+ .link_autoneg = ETH_LINK_FIXED,
};
static int is_kni_initialized;
+static int eth_kni_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_kni_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
static uint16_t
eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -126,8 +131,8 @@ eth_kni_start(struct rte_eth_dev *dev)
internals->kni = rte_kni_alloc(mb_pool, &conf, NULL);
if (internals->kni == NULL) {
- RTE_LOG(ERR, PMD,
- "Fail to create kni interface for port: %d\n",
+ PMD_LOG(ERR,
+ "Fail to create kni interface for port: %d",
port_id);
return -1;
}
@@ -149,11 +154,12 @@ eth_kni_dev_start(struct rte_eth_dev *dev)
}
if (internals->no_request_thread == 0) {
- ret = pthread_create(&internals->thread, NULL,
+ ret = rte_ctrl_thread_create(&internals->thread,
+ "kni_handle_req", NULL,
kni_handle_request, internals);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Fail to create kni request thread\n");
+ PMD_LOG(ERR,
+ "Fail to create kni request thread");
return -1;
}
}
@@ -174,11 +180,11 @@ eth_kni_dev_stop(struct rte_eth_dev *dev)
ret = pthread_cancel(internals->thread);
if (ret)
- RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
+ PMD_LOG(ERR, "Can't cancel the thread");
ret = pthread_join(internals->thread, NULL);
if (ret)
- RTE_LOG(ERR, PMD, "Can't join the thread\n");
+ PMD_LOG(ERR, "Can't join the thread");
internals->stop_thread = 0;
}
@@ -201,7 +207,6 @@ eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static int
@@ -337,25 +342,17 @@ eth_kni_create(struct rte_vdev_device *vdev,
struct pmd_internals *internals;
struct rte_eth_dev_data *data;
struct rte_eth_dev *eth_dev;
- const char *name;
- RTE_LOG(INFO, PMD, "Creating kni ethdev on numa socket %u\n",
+ PMD_LOG(INFO, "Creating kni ethdev on numa socket %u",
numa_node);
- name = rte_vdev_device_name(vdev);
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- return NULL;
-
/* reserve an ethdev entry */
eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals));
- if (eth_dev == NULL) {
- rte_free(data);
+ if (!eth_dev)
return NULL;
- }
internals = eth_dev->data->dev_private;
- rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data = eth_dev->data;
data->nb_rx_queues = 1;
data->nb_tx_queues = 1;
data->dev_link = pmd_link;
@@ -363,7 +360,6 @@ eth_kni_create(struct rte_vdev_device *vdev,
eth_random_addr(internals->eth_addr.addr_bytes);
- eth_dev->data = data;
eth_dev->dev_ops = &eth_kni_ops;
internals->no_request_thread = args->no_request_thread;
@@ -412,7 +408,20 @@ eth_kni_probe(struct rte_vdev_device *vdev)
name = rte_vdev_device_name(vdev);
params = rte_vdev_device_args(vdev);
- RTE_LOG(INFO, PMD, "Initializing eth_kni for %s\n", name);
+ PMD_LOG(INFO, "Initializing eth_kni for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &eth_kni_ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
ret = eth_kni_kvargs_process(&args, params);
if (ret < 0)
@@ -429,6 +438,7 @@ eth_kni_probe(struct rte_vdev_device *vdev)
eth_dev->rx_pkt_burst = eth_kni_rx;
eth_dev->tx_pkt_burst = eth_kni_tx;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
kni_uninit:
@@ -446,7 +456,7 @@ eth_kni_remove(struct rte_vdev_device *vdev)
const char *name;
name = rte_vdev_device_name(vdev);
- RTE_LOG(INFO, PMD, "Un-Initializing eth_kni for %s\n", name);
+ PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name);
/* find the ethdev entry */
eth_dev = rte_eth_dev_allocated(name);
@@ -459,7 +469,6 @@ eth_kni_remove(struct rte_vdev_device *vdev)
rte_kni_release(internals->kni);
rte_free(internals);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
@@ -477,3 +486,12 @@ static struct rte_vdev_driver eth_kni_drv = {
RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "=<int>");
+
+RTE_INIT(eth_kni_init_log);
+static void
+eth_kni_init_log(void)
+{
+ eth_kni_logtype = rte_log_register("pmd.net.kni");
+ if (eth_kni_logtype >= 0)
+ rte_log_set_level(eth_kni_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index e1a20cd6..64b1b86c 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -373,8 +373,6 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
struct lio_device *lio_dev = LIO_DEV(eth_dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- devinfo->pci_dev = pci_dev;
-
switch (pci_dev->id.subsystem_device_id) {
/* CN23xx 10G cards */
case PCI_SUBSYS_DEV_ID_CN2350_210:
@@ -478,9 +476,11 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
}
if (frame_len > ETHER_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
eth_dev->data->mtu = mtu;
@@ -904,32 +904,6 @@ lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
return 0;
}
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param eth_dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &eth_dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static uint64_t
lio_hweight64(uint64_t w)
{
@@ -949,23 +923,19 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
int wait_to_complete __rte_unused)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
/* Initialize */
+ memset(&link, 0, sizeof(link));
link.link_status = ETH_LINK_DOWN;
link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_autoneg = ETH_LINK_AUTONEG;
- memset(&old, 0, sizeof(old));
/* Return what we found */
if (lio_dev->linfo.link.s.link_up == 0) {
/* Interface is down */
- if (lio_dev_atomic_write_link_status(eth_dev, &link))
- return -1;
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
link.link_status = ETH_LINK_UP; /* Interface is up */
@@ -982,13 +952,7 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
link.link_duplex = ETH_LINK_HALF_DUPLEX;
}
- if (lio_dev_atomic_write_link_status(eth_dev, &link))
- return -1;
-
- if (link.link_status == old.link_status)
- return -1;
-
- return 0;
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
/**
@@ -1441,6 +1405,11 @@ lio_dev_start(struct rte_eth_dev *eth_dev)
/* Configure RSS if device configured with multiple RX queues. */
lio_dev_mq_rx_configure(eth_dev);
+ /* Before update the link info,
+ * must set linfo.link.link_status64 to 0.
+ */
+ lio_dev->linfo.link.link_status64 = 0;
+
/* start polling for lsc */
ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
lio_sync_link_state_check,
@@ -2146,19 +2115,8 @@ static int
lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- struct rte_eth_dev *eth_dev;
- int ret;
-
- eth_dev = rte_eth_dev_pci_allocate(pci_dev,
- sizeof(struct lio_device));
- if (eth_dev == NULL)
- return -ENOMEM;
-
- ret = lio_eth_dev_init(eth_dev);
- if (ret)
- rte_eth_dev_pci_release(eth_dev);
-
- return ret;
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),
+ lio_eth_dev_init);
}
static int
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index 704cbe3c..b7d00a04 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['af_packet', 'bonding',
- 'e1000', 'fm10k', 'i40e', 'ixgbe',
- 'null', 'octeontx', 'pcap', 'ring',
- 'sfc', 'thunderx']
+drivers = ['af_packet', 'axgbe', 'bonding', 'dpaa', 'dpaa2',
+ 'e1000', 'enic', 'fm10k', 'i40e', 'ixgbe',
+ 'mvpp2', 'null', 'octeontx', 'pcap', 'ring',
+ 'sfc', 'thunderx', 'virtio']
std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std
std_deps += ['bus_vdev'] # same with vdev bus
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index cc800493..73f9d405 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2012 6WIND S.A.
-# Copyright 2012 Mellanox
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Copyright 2012 Mellanox Technologies, Ltd
include $(RTE_SDK)/mk/rte.vars.mk
@@ -64,6 +37,7 @@ CFLAGS += -D_BSD_SOURCE
CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
CFLAGS += -DMLX4_GLUE='"$(LIB_GLUE)"'
CFLAGS += -DMLX4_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
@@ -95,10 +69,6 @@ else
CFLAGS += -DNDEBUG -UPEDANTIC
endif
-ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE
-CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE)
-endif
-
include $(RTE_SDK)/mk/rte.lib.mk
# Generate and clean-up mlx4_autoconf.h.
@@ -132,8 +102,13 @@ ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
$(LIB): $(LIB_GLUE)
+ifeq ($(LINK_USING_CC),1)
+GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))
+else
+GLUE_LDFLAGS := $(LDFLAGS)
+endif
$(LIB_GLUE): mlx4_glue.o
- $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \
+ $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
-Wl,-h,$(LIB_GLUE) \
-s -shared -o $@ $< -libverbs -lmlx4
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index ee93dafe..a29814b3 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2012 6WIND S.A.
- * Copyright 2012 Mellanox
+ * Copyright 2012 Mellanox Technologies, Ltd
*/
/**
@@ -44,9 +44,15 @@
#include "mlx4.h"
#include "mlx4_glue.h"
#include "mlx4_flow.h"
+#include "mlx4_mr.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
+struct mlx4_dev_list mlx4_mem_event_cb_list =
+ LIST_HEAD_INITIALIZER(mlx4_mem_event_cb_list);
+
+rte_rwlock_t mlx4_mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
+
/** Configuration structure for device arguments. */
struct mlx4_conf {
struct {
@@ -61,6 +67,8 @@ const char *pmd_mlx4_init_params[] = {
NULL,
};
+static void mlx4_dev_stop(struct rte_eth_dev *dev);
+
/**
* DPDK callback for Ethernet device configuration.
*
@@ -123,6 +131,9 @@ mlx4_dev_start(struct rte_eth_dev *dev)
(void *)dev, strerror(-ret));
goto err;
}
+#ifndef NDEBUG
+ mlx4_mr_dump_dev(dev);
+#endif
ret = mlx4_rxq_intr_enable(priv);
if (ret) {
ERROR("%p: interrupt handler installation failed",
@@ -143,8 +154,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
dev->rx_pkt_burst = mlx4_rx_burst;
return 0;
err:
- /* Rollback. */
- priv->started = 0;
+ mlx4_dev_stop(dev);
return ret;
}
@@ -194,10 +204,12 @@ mlx4_dev_close(struct rte_eth_dev *dev)
dev->tx_pkt_burst = mlx4_tx_burst_removed;
rte_wmb();
mlx4_flow_clean(priv);
+ mlx4_rss_deinit(priv);
for (i = 0; i != dev->data->nb_rx_queues; ++i)
mlx4_rx_queue_release(dev->data->rx_queues[i]);
for (i = 0; i != dev->data->nb_tx_queues; ++i)
mlx4_tx_queue_release(dev->data->tx_queues[i]);
+ mlx4_mr_release(dev);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
claim_zero(mlx4_glue->dealloc_pd(priv->pd));
@@ -385,6 +397,99 @@ free_kvlist:
return ret;
}
+/**
+ * Interpret RSS capabilities reported by device.
+ *
+ * This function returns the set of usable Verbs RSS hash fields, kernel
+ * quirks taken into account.
+ *
+ * @param ctx
+ * Verbs context.
+ * @param pd
+ * Verbs protection domain.
+ * @param device_attr_ex
+ * Extended device attributes to interpret.
+ *
+ * @return
+ * Usable RSS hash fields mask in Verbs format.
+ */
+static uint64_t
+mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd,
+ struct ibv_device_attr_ex *device_attr_ex)
+{
+ uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask;
+ struct ibv_cq *cq = NULL;
+ struct ibv_wq *wq = NULL;
+ struct ibv_rwq_ind_table *ind = NULL;
+ struct ibv_qp *qp = NULL;
+
+ if (!hw_rss_sup) {
+ WARN("no RSS capabilities reported; disabling support for UDP"
+ " RSS and inner VXLAN RSS");
+ return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
+ IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
+ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
+ }
+ if (!(hw_rss_sup & IBV_RX_HASH_INNER))
+ return hw_rss_sup;
+ /*
+ * Although reported as supported, missing code in some Linux
+ * versions (v4.15, v4.16) prevents the creation of hash QPs with
+ * inner capability.
+ *
+ * There is no choice but to attempt to instantiate a temporary RSS
+ * context in order to confirm its support.
+ */
+ cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0);
+ wq = cq ? mlx4_glue->create_wq
+ (ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = pd,
+ .cq = cq,
+ }) : NULL;
+ ind = wq ? mlx4_glue->create_rwq_ind_table
+ (ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = 0,
+ .ind_tbl = &wq,
+ .comp_mask = 0,
+ }) : NULL;
+ qp = ind ? mlx4_glue->create_qp_ex
+ (ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .comp_mask =
+ (IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_RX_HASH |
+ IBV_QP_INIT_ATTR_IND_TABLE),
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .pd = pd,
+ .rwq_ind_tbl = ind,
+ .rx_hash_conf = {
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .rx_hash_key = mlx4_rss_hash_key_default,
+ .rx_hash_fields_mask = hw_rss_sup,
+ },
+ }) : NULL;
+ if (!qp) {
+ WARN("disabling unusable inner RSS capability due to kernel"
+ " quirk");
+ hw_rss_sup &= ~IBV_RX_HASH_INNER;
+ } else {
+ claim_zero(mlx4_glue->destroy_qp(qp));
+ }
+ if (ind)
+ claim_zero(mlx4_glue->destroy_rwq_ind_table(ind));
+ if (wq)
+ claim_zero(mlx4_glue->destroy_wq(wq));
+ if (cq)
+ claim_zero(mlx4_glue->destroy_cq(cq));
+ return hw_rss_sup;
+}
+
static struct rte_pci_driver mlx4_driver;
/**
@@ -562,22 +667,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
(device_attr.vendor_part_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
DEBUG("L2 tunnel checksum offloads are %ssupported",
- (priv->hw_csum_l2tun ? "" : "not "));
- priv->hw_rss_sup = device_attr_ex.rss_caps.rx_hash_fields_mask;
- if (!priv->hw_rss_sup) {
- WARN("no RSS capabilities reported; disabling support"
- " for UDP RSS and inner VXLAN RSS");
- /* Fake support for all possible RSS hash fields. */
- priv->hw_rss_sup = ~UINT64_C(0);
- priv->hw_rss_sup = mlx4_conv_rss_hf(priv, -1);
- /* Filter out known unsupported fields. */
- priv->hw_rss_sup &=
- ~(uint64_t)(IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP |
- IBV_RX_HASH_INNER);
- }
+ priv->hw_csum_l2tun ? "" : "not ");
+ priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd,
+ &device_attr_ex);
DEBUG("supported RSS hash fields mask: %016" PRIx64,
priv->hw_rss_sup);
+ priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_SCATTER_FCS);
+ DEBUG("FCS stripping toggling is %ssupported",
+ priv->hw_fcs_strip ? "" : "not ");
/* Configure the first MAC address by default. */
if (mlx4_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
@@ -649,6 +747,24 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
/* Update link status once if waiting for LSC. */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
mlx4_link_update(eth_dev, 0);
+ /*
+ * Once the device is added to the list of memory event
+ * callback, its global MR cache table cannot be expanded
+ * on the fly because of deadlock. If it overflows, lookup
+ * should be done by searching MR list linearly, which is slow.
+ */
+ err = mlx4_mr_btree_init(&priv->mr.cache,
+ MLX4_MR_BTREE_CACHE_N * 2,
+ eth_dev->device->numa_node);
+ if (err) {
+ /* rte_errno is already set. */
+ goto port_error;
+ }
+ /* Add device to memory callback list. */
+ rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
+ LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx4_mem_event_rwlock);
+ rte_eth_dev_probing_finish(eth_dev);
continue;
port_error:
rte_free(priv);
@@ -708,11 +824,53 @@ static struct rte_pci_driver mlx4_driver = {
#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS
/**
+ * Suffix RTE_EAL_PMD_PATH with "-glue".
+ *
+ * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * suffixing its last component.
+ *
+ * @param buf[out]
+ * Output buffer, should be large enough otherwise NULL is returned.
+ * @param size
+ * Size of @p out.
+ *
+ * @return
+ * Pointer to @p buf or @p NULL in case suffix cannot be appended.
+ */
+static char *
+mlx4_glue_path(char *buf, size_t size)
+{
+ static const char *const bad[] = { "/", ".", "..", NULL };
+ const char *path = RTE_EAL_PMD_PATH;
+ size_t len = strlen(path);
+ size_t off;
+ int i;
+
+ while (len && path[len - 1] == '/')
+ --len;
+ for (off = len; off && path[off - 1] != '/'; --off)
+ ;
+ for (i = 0; bad[i]; ++i)
+ if (!strncmp(path + off, bad[i], (int)(len - off)))
+ goto error;
+ i = snprintf(buf, size, "%.*s-glue", (int)len, path);
+ if (i == -1 || (size_t)i >= size)
+ goto error;
+ return buf;
+error:
+ ERROR("unable to append \"-glue\" to last component of"
+ " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
+ " please re-configure DPDK");
+ return NULL;
+}
+
+/**
* Initialization routine for run-time dependency on rdma-core.
*/
static int
mlx4_glue_init(void)
{
+ char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
const char *path[] = {
/*
* A basic security check is necessary before trusting
@@ -720,7 +878,13 @@ mlx4_glue_init(void)
*/
(geteuid() == getuid() && getegid() == getgid() ?
getenv("MLX4_GLUE_PATH") : NULL),
- RTE_EAL_PMD_PATH,
+ /*
+ * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
+ * variant, otherwise let dlopen() look up libraries on its
+ * own.
+ */
+ (*RTE_EAL_PMD_PATH ?
+ mlx4_glue_path(glue_path, sizeof(glue_path)) : ""),
};
unsigned int i = 0;
void *handle = NULL;
@@ -828,6 +992,8 @@ rte_mlx4_pmd_init(void)
}
mlx4_glue->fork_init();
rte_pci_register(&mlx4_driver);
+ rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
+ mlx4_mr_mem_event_cb, NULL);
}
RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 19c8a223..300cb4d7 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2012 6WIND S.A.
- * Copyright 2012 Mellanox
+ * Copyright 2012 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX4_H_
@@ -23,7 +23,9 @@
#include <rte_ether.h>
#include <rte_interrupts.h>
#include <rte_mempool.h>
-#include <rte_spinlock.h>
+#include <rte_rwlock.h>
+
+#include "mlx4_mr.h"
#ifndef IBV_RX_HASH_INNER
/** This is not necessarily defined by supported RDMA core versions. */
@@ -42,17 +44,6 @@
/** Fixed RSS hash key size in bytes. Cannot be modified. */
#define MLX4_RSS_HASH_KEY_SIZE 40
-/**
- * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
- * from which buffers are to be transmitted will have to be mapped by this
- * driver to their own Memory Region (MR). This is a slow operation.
- *
- * This value is always 1 for RX queues.
- */
-#ifndef MLX4_PMD_TX_MP_CACHE
-#define MLX4_PMD_TX_MP_CACHE 8
-#endif
-
/** Interrupt alarm timeout value in microseconds. */
#define MLX4_INTR_ALARM_TIMEOUT 100000
@@ -78,20 +69,12 @@ struct rxq;
struct txq;
struct rte_flow;
-/** Memory region descriptor. */
-struct mlx4_mr {
- LIST_ENTRY(mlx4_mr) next; /**< Next entry in list. */
- uintptr_t start; /**< Base address for memory region. */
- uintptr_t end; /**< End address for memory region. */
- uint32_t lkey; /**< L_Key extracted from @p mr. */
- uint32_t refcnt; /**< Reference count for this object. */
- struct priv *priv; /**< Back pointer to private data. */
- struct ibv_mr *mr; /**< Memory region associated with @p mp. */
- struct rte_mempool *mp; /**< Target memory pool (mempool). */
-};
+LIST_HEAD(mlx4_dev_list, priv);
+LIST_HEAD(mlx4_mr_list, mlx4_mr);
/** Private data structure. */
struct priv {
+ LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
struct rte_eth_dev *dev; /**< Ethernet device. */
struct ibv_context *ctx; /**< Verbs context. */
struct ibv_device_attr device_attr; /**< Device properties. */
@@ -103,15 +86,22 @@ struct priv {
uint32_t vf:1; /**< This is a VF device. */
uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
uint32_t isolated:1; /**< Toggle isolated mode. */
+ uint32_t rss_init:1; /**< Common RSS context is initialized. */
uint32_t hw_csum:1; /**< Checksum offload is supported. */
uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */
+ uint32_t hw_fcs_strip:1; /**< FCS stripping toggling is supported. */
uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
+ struct {
+ uint32_t dev_gen; /* Generation number to flush local caches. */
+ rte_rwlock_t rwlock; /* MR Lock. */
+ struct mlx4_mr_btree cache; /* Global MR cache table. */
+ struct mlx4_mr_list mr_list; /* Registered MR list. */
+ struct mlx4_mr_list mr_free_list; /* Freed MR list. */
+ } mr;
LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
- LIST_HEAD(, mlx4_mr) mr; /**< Registered memory regions. */
- rte_spinlock_t mr_lock; /**< Lock for @p mr access. */
struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
/**< Configured MAC addresses. Unused entries are zeroed. */
};
@@ -131,7 +121,7 @@ void mlx4_allmulticast_disable(struct rte_eth_dev *dev);
void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq);
-void mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
+int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
void mlx4_stats_reset(struct rte_eth_dev *dev);
@@ -154,11 +144,4 @@ void mlx4_rxq_intr_disable(struct priv *priv);
int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
-/* mlx4_mr.c */
-
-struct mlx4_mr *mlx4_mr_get(struct priv *priv, struct rte_mempool *mp);
-void mlx4_mr_put(struct mlx4_mr *mr);
-uint32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp,
- uint32_t i);
-
#endif /* RTE_PMD_MLX4_H_ */
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 3bc69273..30deb3ef 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -39,6 +39,7 @@
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_pci.h>
+#include <rte_string_fns.h>
#include "mlx4.h"
#include "mlx4_flow.h"
@@ -120,7 +121,7 @@ try_dev_id:
goto try_dev_id;
dev_port_prev = dev_port;
if (dev_port == (priv->port - 1u))
- snprintf(match, sizeof(match), "%s", name);
+ strlcpy(match, name, sizeof(match));
}
closedir(dir);
if (match[0] == '\0') {
@@ -132,167 +133,6 @@ try_dev_id:
}
/**
- * Read from sysfs entry.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[out] buf
- * Data output buffer.
- * @param size
- * Buffer size.
- *
- * @return
- * Number of bytes read on success, negative errno value otherwise and
- * rte_errno is set.
- */
-static int
-mlx4_sysfs_read(const struct priv *priv, const char *entry,
- char *buf, size_t size)
-{
- char ifname[IF_NAMESIZE];
- FILE *file;
- int ret;
-
- ret = mlx4_get_ifname(priv, &ifname);
- if (ret)
- return ret;
-
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
-
- file = fopen(path, "rb");
- if (file == NULL) {
- rte_errno = errno;
- return -rte_errno;
- }
- ret = fread(buf, 1, size, file);
- if ((size_t)ret < size && ferror(file)) {
- rte_errno = EIO;
- ret = -rte_errno;
- } else {
- ret = size;
- }
- fclose(file);
- return ret;
-}
-
-/**
- * Write to sysfs entry.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[in] buf
- * Data buffer.
- * @param size
- * Buffer size.
- *
- * @return
- * Number of bytes written on success, negative errno value otherwise and
- * rte_errno is set.
- */
-static int
-mlx4_sysfs_write(const struct priv *priv, const char *entry,
- char *buf, size_t size)
-{
- char ifname[IF_NAMESIZE];
- FILE *file;
- int ret;
-
- ret = mlx4_get_ifname(priv, &ifname);
- if (ret)
- return ret;
-
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
-
- file = fopen(path, "wb");
- if (file == NULL) {
- rte_errno = errno;
- return -rte_errno;
- }
- ret = fwrite(buf, 1, size, file);
- if ((size_t)ret < size || ferror(file)) {
- rte_errno = EIO;
- ret = -rte_errno;
- } else {
- ret = size;
- }
- fclose(file);
- return ret;
-}
-
-/**
- * Get unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param[out] value
- * Value output buffer.
- *
- * @return
- * 0 on success, negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx4_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
-{
- int ret;
- unsigned long value_ret;
- char value_str[32];
-
- ret = mlx4_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
- if (ret < 0) {
- DEBUG("cannot read %s value from sysfs: %s",
- name, strerror(rte_errno));
- return ret;
- }
- value_str[ret] = '\0';
- errno = 0;
- value_ret = strtoul(value_str, NULL, 0);
- if (errno) {
- rte_errno = errno;
- DEBUG("invalid %s value `%s': %s", name, value_str,
- strerror(rte_errno));
- return -rte_errno;
- }
- *value = value_ret;
- return 0;
-}
-
-/**
- * Set unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param value
- * Value to set.
- *
- * @return
- * 0 on success, negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx4_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
-{
- int ret;
- MKSTR(value_str, "%lu", value);
-
- ret = mlx4_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
- if (ret < 0) {
- DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
- name, value_str, value, strerror(rte_errno));
- return ret;
- }
- return 0;
-}
-
-/**
* Perform ifreq ioctl() on associated Ethernet device.
*
* @param[in] priv
@@ -361,12 +201,12 @@ mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
int
mlx4_mtu_get(struct priv *priv, uint16_t *mtu)
{
- unsigned long ulong_mtu = 0;
- int ret = mlx4_get_sysfs_ulong(priv, "mtu", &ulong_mtu);
+ struct ifreq request;
+ int ret = mlx4_ifreq(priv, SIOCGIFMTU, &request);
if (ret)
return ret;
- *mtu = ulong_mtu;
+ *mtu = request.ifr_mtu;
return 0;
}
@@ -385,20 +225,13 @@ int
mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct priv *priv = dev->data->dev_private;
- uint16_t new_mtu;
- int ret = mlx4_set_sysfs_ulong(priv, "mtu", mtu);
+ struct ifreq request = { .ifr_mtu = mtu, };
+ int ret = mlx4_ifreq(priv, SIOCSIFMTU, &request);
if (ret)
return ret;
- ret = mlx4_mtu_get(priv, &new_mtu);
- if (ret)
- return ret;
- if (new_mtu == mtu) {
- priv->mtu = mtu;
- return 0;
- }
- rte_errno = EINVAL;
- return -rte_errno;
+ priv->mtu = mtu;
+ return 0;
}
/**
@@ -417,14 +250,14 @@ mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
static int
mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
{
- unsigned long tmp = 0;
- int ret = mlx4_get_sysfs_ulong(priv, "flags", &tmp);
+ struct ifreq request;
+ int ret = mlx4_ifreq(priv, SIOCGIFFLAGS, &request);
if (ret)
return ret;
- tmp &= keep;
- tmp |= (flags & (~keep));
- return mlx4_set_sysfs_ulong(priv, "flags", tmp);
+ request.ifr_flags &= keep;
+ request.ifr_flags |= flags & ~keep;
+ return mlx4_ifreq(priv, SIOCSIFFLAGS, &request);
}
/**
@@ -701,11 +534,14 @@ mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
* Pointer to Ethernet device structure.
* @param mac_addr
* MAC address to register.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
*/
-void
+int
mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
- mlx4_mac_addr_add(dev, mac_addr, 0, 0);
+ return mlx4_mac_addr_add(dev, mac_addr, 0, 0);
}
/**
@@ -723,7 +559,6 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
info->max_rx_pktlen = 65536;
@@ -752,6 +587,7 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
ETH_LINK_SPEED_20G |
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_56G;
+ info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
}
/**
@@ -878,7 +714,7 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
}
link_speed = ethtool_cmd_speed(&edata);
if (link_speed == -1)
- dev_link.link_speed = 0;
+ dev_link.link_speed = ETH_SPEED_NUM_NONE;
else
dev_link.link_speed = link_speed;
dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 2d55bfe0..b40e7e5c 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -76,69 +76,94 @@ struct mlx4_drop {
};
/**
- * Convert DPDK RSS hash fields to their Verbs equivalent.
+ * Convert supported RSS hash field types between DPDK and Verbs formats.
*
- * This function returns the supported (default) set when @p rss_hf has
- * special value (uint64_t)-1.
+ * This function returns the supported (default) set when @p types has
+ * special value 0.
*
* @param priv
* Pointer to private structure.
- * @param rss_hf
- * Hash fields in DPDK format (see struct rte_eth_rss_conf).
+ * @param types
+ * Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct
+ * rte_eth_rss_conf) or Verbs format.
+ * @param verbs_to_dpdk
+ * A zero value converts @p types from DPDK to Verbs, a nonzero value
+ * performs the reverse operation.
*
* @return
- * A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
- * otherwise and rte_errno is set.
+ * Converted RSS hash fields on success, (uint64_t)-1 otherwise and
+ * rte_errno is set.
*/
uint64_t
-mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf)
+mlx4_conv_rss_types(struct priv *priv, uint64_t types, int verbs_to_dpdk)
{
- enum { IPV4, IPV6, TCP, UDP, };
- const uint64_t in[] = {
- [IPV4] = (ETH_RSS_IPV4 |
- ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_OTHER),
- [IPV6] = (ETH_RSS_IPV6 |
- ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_IPV6_EX |
- ETH_RSS_IPV6_TCP_EX |
- ETH_RSS_IPV6_UDP_EX),
- [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX),
- [UDP] = (ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX),
+ enum {
+ INNER,
+ IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3,
+ TCP, UDP,
+ IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1,
};
- const uint64_t out[RTE_DIM(in)] = {
- [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
- [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
- [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
- [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
+ enum {
+ VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
+ VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
+ VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
+ VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
};
+ static const uint64_t dpdk[] = {
+ [INNER] = 0,
+ [IPV4] = ETH_RSS_IPV4,
+ [IPV4_1] = ETH_RSS_FRAG_IPV4,
+ [IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
+ [IPV6] = ETH_RSS_IPV6,
+ [IPV6_1] = ETH_RSS_FRAG_IPV6,
+ [IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
+ [IPV6_3] = ETH_RSS_IPV6_EX,
+ [TCP] = 0,
+ [UDP] = 0,
+ [IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
+ [IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
+ [IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
+ [IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
+ [IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
+ [IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+ };
+ static const uint64_t verbs[RTE_DIM(dpdk)] = {
+ [INNER] = IBV_RX_HASH_INNER,
+ [IPV4] = VERBS_IPV4,
+ [IPV4_1] = VERBS_IPV4,
+ [IPV4_2] = VERBS_IPV4,
+ [IPV6] = VERBS_IPV6,
+ [IPV6_1] = VERBS_IPV6,
+ [IPV6_2] = VERBS_IPV6,
+ [IPV6_3] = VERBS_IPV6,
+ [TCP] = VERBS_TCP,
+ [UDP] = VERBS_UDP,
+ [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP,
+ [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP,
+ [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP,
+ [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP,
+ [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP,
+ [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP,
+ };
+ const uint64_t *in = verbs_to_dpdk ? verbs : dpdk;
+ const uint64_t *out = verbs_to_dpdk ? dpdk : verbs;
uint64_t seen = 0;
uint64_t conv = 0;
unsigned int i;
- for (i = 0; i != RTE_DIM(in); ++i)
- if (rss_hf & in[i]) {
- seen |= rss_hf & in[i];
+ if (!types) {
+ if (!verbs_to_dpdk)
+ return priv->hw_rss_sup;
+ types = priv->hw_rss_sup;
+ }
+ for (i = 0; i != RTE_DIM(dpdk); ++i)
+ if (in[i] && (types & in[i]) == in[i]) {
+ seen |= types & in[i];
conv |= out[i];
}
- if ((conv & priv->hw_rss_sup) == conv) {
- if (rss_hf == (uint64_t)-1) {
- /* Include inner RSS by default if supported. */
- conv |= priv->hw_rss_sup & IBV_RX_HASH_INNER;
- return conv;
- }
- if (!(rss_hf & ~seen))
- return conv;
- }
+ if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) &&
+ !(types & ~seen))
+ return conv;
rte_errno = ENOTSUP;
return (uint64_t)-1;
}
@@ -362,6 +387,9 @@ error:
* Additional mlx4-specific constraints on supported fields:
*
* - No support for partial masks.
+ * - Due to HW/FW limitation, flow rule priority is not taken into account
+ * when matching UDP destination ports, doing is therefore only supported
+ * at the highest priority level (0).
*
* @param[in, out] flow
* Flow rule handle to update.
@@ -393,6 +421,11 @@ mlx4_flow_merge_udp(struct rte_flow *flow,
msg = "mlx4 does not support matching partial UDP fields";
goto error;
}
+ if (mask && mask->hdr.dst_port && flow->priority) {
+ msg = "combining UDP destination port matching with a nonzero"
+ " priority level is not supported";
+ goto error;
+ }
if (!flow->ibv_attr)
return 0;
++flow->ibv_attr->num_of_specs;
@@ -637,6 +670,7 @@ mlx4_flow_prepare(struct priv *priv,
struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
struct rte_flow *flow = &temp;
const char *msg = NULL;
+ int overlap;
if (attr->group)
return rte_flow_error_set
@@ -651,12 +685,18 @@ mlx4_flow_prepare(struct priv *priv,
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
NULL, "egress is not supported");
+ if (attr->transfer)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
if (!attr->ingress)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
NULL, "only ingress is supported");
fill:
+ overlap = 0;
proc = mlx4_flow_proc_item_list;
+ flow->priority = attr->priority;
/* Go over pattern. */
for (item = pattern; item->type; ++item) {
const struct mlx4_flow_proc_item *next = NULL;
@@ -702,14 +742,24 @@ fill:
}
/* Go over actions list. */
for (action = actions; action->type; ++action) {
+ /* This one may appear anywhere multiple times. */
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+ /* Fate-deciding actions may appear exactly once. */
+ if (overlap) {
+ msg = "cannot combine several fate-deciding actions,"
+ " choose between DROP, QUEUE or RSS";
+ goto exit_action_not_supported;
+ }
+ overlap = 1;
switch (action->type) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
- const struct rte_eth_rss_conf *rss_conf;
+ const uint8_t *rss_key;
+ uint32_t rss_key_len;
+ uint64_t fields;
unsigned int i;
- case RTE_FLOW_ACTION_TYPE_VOID:
- continue;
case RTE_FLOW_ACTION_TYPE_DROP:
flow->drop = 1;
break;
@@ -736,54 +786,68 @@ fill:
break;
rss = action->conf;
/* Default RSS configuration if none is provided. */
- rss_conf =
- rss->rss_conf ?
- rss->rss_conf :
- &(struct rte_eth_rss_conf){
- .rss_key = mlx4_rss_hash_key_default,
- .rss_key_len = MLX4_RSS_HASH_KEY_SIZE,
- .rss_hf = -1,
- };
+ if (rss->key_len) {
+ rss_key = rss->key;
+ rss_key_len = rss->key_len;
+ } else {
+ rss_key = mlx4_rss_hash_key_default;
+ rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
+ }
/* Sanity checks. */
- for (i = 0; i < rss->num; ++i)
+ for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
priv->dev->data->nb_rx_queues)
break;
- if (i != rss->num) {
+ if (i != rss->queue_num) {
msg = "queue index target beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
}
- if (!rte_is_power_of_2(rss->num)) {
+ if (!rte_is_power_of_2(rss->queue_num)) {
msg = "for RSS, mlx4 requires the number of"
" queues to be a power of two";
goto exit_action_not_supported;
}
- if (rss_conf->rss_key_len !=
- sizeof(flow->rss->key)) {
+ if (rss_key_len != sizeof(flow->rss->key)) {
msg = "mlx4 supports exactly one RSS hash key"
" length: "
MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
goto exit_action_not_supported;
}
- for (i = 1; i < rss->num; ++i)
+ for (i = 1; i < rss->queue_num; ++i)
if (rss->queue[i] - rss->queue[i - 1] != 1)
break;
- if (i != rss->num) {
+ if (i != rss->queue_num) {
msg = "mlx4 requires RSS contexts to use"
" consecutive queue indices only";
goto exit_action_not_supported;
}
- if (rss->queue[0] % rss->num) {
+ if (rss->queue[0] % rss->queue_num) {
msg = "mlx4 requires the first queue of a RSS"
" context to be aligned on a multiple"
" of the context size";
goto exit_action_not_supported;
}
+ if (rss->func &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
+ msg = "the only supported RSS hash function"
+ " is Toeplitz";
+ goto exit_action_not_supported;
+ }
+ if (rss->level) {
+ msg = "a nonzero RSS encapsulation level is"
+ " not supported";
+ goto exit_action_not_supported;
+ }
+ rte_errno = 0;
+ fields = mlx4_conv_rss_types(priv, rss->types, 0);
+ if (fields == (uint64_t)-1 && rte_errno) {
+ msg = "unsupported RSS hash type requested";
+ goto exit_action_not_supported;
+ }
flow->rss = mlx4_rss_get
- (priv,
- mlx4_conv_rss_hf(priv, rss_conf->rss_hf),
- rss_conf->rss_key, rss->num, rss->queue);
+ (priv, fields, rss_key, rss->queue_num,
+ rss->queue);
if (!flow->rss) {
msg = "either invalid parameters or not enough"
" resources for additional multi-queue"
@@ -795,10 +859,9 @@ fill:
goto exit_action_not_supported;
}
}
- if (!flow->rss && !flow->drop)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "no valid action");
+ /* When fate is unknown, drop traffic. */
+ if (!overlap)
+ flow->drop = 1;
/* Validation ends here. */
if (!addr) {
if (flow->rss)
@@ -820,11 +883,14 @@ fill:
},
};
- if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec)))
+ if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) {
+ if (temp.rss)
+ mlx4_rss_put(temp.rss);
return rte_flow_error_set
(error, -rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"flow rule handle allocation failure");
+ }
/* Most fields will be updated by second pass. */
*flow = (struct rte_flow){
.ibv_attr = temp.ibv_attr,
@@ -1264,14 +1330,20 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
*/
uint32_t queues =
rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
- alignas(struct rte_flow_action_rss) uint8_t rss_conf_data
- [offsetof(struct rte_flow_action_rss, queue) +
- sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues];
- struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data;
+ uint16_t queue[queues];
+ struct rte_flow_action_rss action_rss = {
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = 0,
+ .types = 0,
+ .key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .queue_num = queues,
+ .key = mlx4_rss_hash_key_default,
+ .queue = queue,
+ };
struct rte_flow_action actions[] = {
{
.type = RTE_FLOW_ACTION_TYPE_RSS,
- .conf = rss_conf,
+ .conf = &action_rss,
},
{
.type = RTE_FLOW_ACTION_TYPE_END,
@@ -1293,12 +1365,8 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
if (!queues)
goto error;
/* Prepare default RSS configuration. */
- *rss_conf = (struct rte_flow_action_rss){
- .rss_conf = NULL, /* Rely on default fallback settings. */
- .num = queues,
- };
for (i = 0; i != queues; ++i)
- rss_conf->queue[i] = i;
+ queue[i] = i;
/*
* Set up VLAN item if filtering is enabled and at least one VLAN
* filter is configured.
@@ -1357,7 +1425,7 @@ next_vlan:
if (j != sizeof(mac->addr_bytes))
continue;
if (flow->rss->queues != queues ||
- memcmp(flow->rss->queue_id, rss_conf->queue,
+ memcmp(flow->rss->queue_id, action_rss.queue,
queues * sizeof(flow->rss->queue_id[0])))
continue;
break;
@@ -1397,7 +1465,7 @@ next_vlan:
if (flow && flow->internal) {
assert(flow->rss);
if (flow->rss->queues != queues ||
- memcmp(flow->rss->queue_id, rss_conf->queue,
+ memcmp(flow->rss->queue_id, action_rss.queue,
queues * sizeof(flow->rss->queue_id[0])))
flow = NULL;
}
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
index 00188a65..2917ebe9 100644
--- a/drivers/net/mlx4/mlx4_flow.h
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX4_FLOW_H_
@@ -42,12 +42,14 @@ struct rte_flow {
uint32_t promisc:1; /**< This rule matches everything. */
uint32_t allmulti:1; /**< This rule matches all multicast traffic. */
uint32_t drop:1; /**< This rule drops packets. */
+ uint32_t priority; /**< Flow rule priority. */
struct mlx4_rss *rss; /**< Rx target. */
};
/* mlx4_flow.c */
-uint64_t mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf);
+uint64_t mlx4_conv_rss_types(struct priv *priv, uint64_t types,
+ int verbs_to_dpdk);
int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
void mlx4_flow_clean(struct priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx4/mlx4_glue.c b/drivers/net/mlx4/mlx4_glue.c
index 3b79d320..67b3bfac 100644
--- a/drivers/net/mlx4/mlx4_glue.c
+++ b/drivers/net/mlx4/mlx4_glue.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
#include <stddef.h>
diff --git a/drivers/net/mlx4/mlx4_glue.h b/drivers/net/mlx4/mlx4_glue.h
index 368f906b..668ca867 100644
--- a/drivers/net/mlx4/mlx4_glue.h
+++ b/drivers/net/mlx4/mlx4_glue.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
#ifndef MLX4_GLUE_H_
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index 2141992e..eeb982a0 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index 9a1e4de3..d23d3c61 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -30,237 +30,1152 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_mempool.h>
-#include <rte_spinlock.h>
+#include <rte_rwlock.h>
#include "mlx4_glue.h"
+#include "mlx4_mr.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
-struct mlx4_check_mempool_data {
+struct mr_find_contig_memsegs_data {
+ uintptr_t addr;
+ uintptr_t start;
+ uintptr_t end;
+ const struct rte_memseg_list *msl;
+};
+
+struct mr_update_mp_data {
+ struct rte_eth_dev *dev;
+ struct mlx4_mr_ctrl *mr_ctrl;
int ret;
- char *start;
- char *end;
};
/**
- * Called by mlx4_check_mempool() when iterating the memory chunks.
- *
- * @param[in] mp
- * Pointer to memory pool (unused).
- * @param[in, out] data
- * Pointer to shared buffer with mlx4_check_mempool().
- * @param[in] memhdr
- * Pointer to mempool chunk header.
- * @param mem_idx
- * Mempool element index (unused).
+ * Expand B-tree table to a given size. Can't be called with holding
+ * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries for expansion.
+ *
+ * @return
+ * 0 on success, -1 on failure.
*/
-static void
-mlx4_check_mempool_cb(struct rte_mempool *mp, void *opaque,
- struct rte_mempool_memhdr *memhdr,
- unsigned int mem_idx)
+static int
+mr_btree_expand(struct mlx4_mr_btree *bt, int n)
+{
+ void *mem;
+ int ret = 0;
+
+ if (n <= bt->size)
+ return ret;
+ /*
+ * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
+ * used inside if there's no room to expand. Because this is a quite
+ * rare case and a part of very slow path, it is very acceptable.
+ * Initially cache_bh[] will be given practically enough space and once
+ * it is expanded, expansion wouldn't be needed again ever.
+ */
+ mem = rte_realloc(bt->table, n * sizeof(struct mlx4_mr_cache), 0);
+ if (mem == NULL) {
+ /* Not an error, B-tree search will be skipped. */
+ WARN("failed to expand MR B-tree (%p) table", (void *)bt);
+ ret = -1;
+ } else {
+ DEBUG("expanded MR B-tree table (size=%u)", n);
+ bt->table = mem;
+ bt->size = n;
+ }
+ return ret;
+}
+
+/**
+ * Look up LKey from given B-tree lookup table, store the last index and return
+ * searched LKey.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param[out] idx
+ * Pointer to index. Even on search failure, returns index where it stops
+ * searching so that index can be used when inserting a new entry.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mr_btree_lookup(struct mlx4_mr_btree *bt, uint16_t *idx, uintptr_t addr)
+{
+ struct mlx4_mr_cache *lkp_tbl;
+ uint16_t n;
+ uint16_t base = 0;
+
+ assert(bt != NULL);
+ lkp_tbl = *bt->table;
+ n = bt->len;
+ /* First entry must be NULL for comparison. */
+ assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
+ /* Binary search. */
+ do {
+ register uint16_t delta = n >> 1;
+
+ if (addr < lkp_tbl[base + delta].start) {
+ n = delta;
+ } else {
+ base += delta;
+ n -= delta;
+ }
+ } while (n > 1);
+ assert(addr >= lkp_tbl[base].start);
+ *idx = base;
+ if (addr < lkp_tbl[base].end)
+ return lkp_tbl[base].lkey;
+ /* Not found. */
+ return UINT32_MAX;
+}
+
+/**
+ * Insert an entry to B-tree lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param entry
+ * Pointer to new entry to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_insert(struct mlx4_mr_btree *bt, struct mlx4_mr_cache *entry)
{
- struct mlx4_check_mempool_data *data = opaque;
+ struct mlx4_mr_cache *lkp_tbl;
+ uint16_t idx = 0;
+ size_t shift;
- (void)mp;
- (void)mem_idx;
- /* It already failed, skip the next chunks. */
- if (data->ret != 0)
+ assert(bt != NULL);
+ assert(bt->len <= bt->size);
+ assert(bt->len > 0);
+ lkp_tbl = *bt->table;
+ /* Find out the slot for insertion. */
+ if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
+ DEBUG("abort insertion to B-tree(%p): already exist at"
+ " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ /* Already exist, return. */
+ return 0;
+ }
+ /* If table is full, return error. */
+ if (unlikely(bt->len == bt->size)) {
+ bt->overflow = 1;
+ return -1;
+ }
+ /* Insert entry. */
+ ++idx;
+ shift = (bt->len - idx) * sizeof(struct mlx4_mr_cache);
+ if (shift)
+ memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
+ lkp_tbl[idx] = *entry;
+ bt->len++;
+ DEBUG("inserted B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ return 0;
+}
+
+/**
+ * Initialize B-tree and allocate memory for lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries to allocate.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket)
+{
+ if (bt == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ memset(bt, 0, sizeof(*bt));
+ bt->table = rte_calloc_socket("B-tree table",
+ n, sizeof(struct mlx4_mr_cache),
+ 0, socket);
+ if (bt->table == NULL) {
+ rte_errno = ENOMEM;
+ ERROR("failed to allocate memory for btree cache on socket %d",
+ socket);
+ return -rte_errno;
+ }
+ bt->size = n;
+ /* First entry must be NULL for binary search. */
+ (*bt->table)[bt->len++] = (struct mlx4_mr_cache) {
+ .lkey = UINT32_MAX,
+ };
+ DEBUG("initialized B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ return 0;
+}
+
+/**
+ * Free B-tree resources.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx4_mr_btree_free(struct mlx4_mr_btree *bt)
+{
+ if (bt == NULL)
return;
- /* It is the first chunk. */
- if (data->start == NULL && data->end == NULL) {
- data->start = memhdr->addr;
- data->end = data->start + memhdr->len;
+ DEBUG("freeing B-tree %p with table %p", (void *)bt, (void *)bt->table);
+ rte_free(bt->table);
+ memset(bt, 0, sizeof(*bt));
+}
+
+#ifndef NDEBUG
+/**
+ * Dump all the entries in a B-tree
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx4_mr_btree_dump(struct mlx4_mr_btree *bt)
+{
+ int idx;
+ struct mlx4_mr_cache *lkp_tbl;
+
+ if (bt == NULL)
return;
+ lkp_tbl = *bt->table;
+ for (idx = 0; idx < bt->len; ++idx) {
+ struct mlx4_mr_cache *entry = &lkp_tbl[idx];
+
+ DEBUG("B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
}
- if (data->end == memhdr->addr) {
- data->end += memhdr->len;
- return;
+}
+#endif
+
+/**
+ * Find virtually contiguous memory chunk in a given MR.
+ *
+ * @param dev
+ * Pointer to MR structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If not found, this will not be
+ * updated.
+ * @param start_idx
+ * Start index of the memseg bitmap.
+ *
+ * @return
+ * Next index to go on lookup.
+ */
+static int
+mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry,
+ int base_idx)
+{
+ uintptr_t start = 0;
+ uintptr_t end = 0;
+ uint32_t idx = 0;
+
+ for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
+ if (rte_bitmap_get(mr->ms_bmp, idx)) {
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+
+ msl = mr->msl;
+ ms = rte_fbarray_get(&msl->memseg_arr,
+ mr->ms_base_idx + idx);
+ assert(msl->page_sz == ms->hugepage_sz);
+ if (!start)
+ start = ms->addr_64;
+ end = ms->addr_64 + ms->hugepage_sz;
+ } else if (start) {
+ /* Passed the end of a fragment. */
+ break;
+ }
}
- if (data->start == (char *)memhdr->addr + memhdr->len) {
- data->start -= memhdr->len;
- return;
+ if (start) {
+ /* Found one chunk. */
+ entry->start = start;
+ entry->end = end;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
}
- /* Error, mempool is not virtually contiguous. */
- data->ret = -1;
+ return idx;
}
/**
- * Check if a mempool can be used: it must be virtually contiguous.
+ * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
+ * Then, this entry will have to be searched by mr_lookup_dev_list() in
+ * mlx4_mr_create() on miss.
*
- * @param[in] mp
- * Pointer to memory pool.
- * @param[out] start
- * Pointer to the start address of the mempool virtual memory area.
- * @param[out] end
- * Pointer to the end address of the mempool virtual memory area.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr
+ * Pointer to MR to insert.
*
* @return
- * 0 on success (mempool is virtually contiguous), -1 on error.
+ * 0 on success, -1 on failure.
*/
static int
-mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start, uintptr_t *end)
+mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_check_mempool_data data;
+ struct priv *priv = dev->data->dev_private;
+ unsigned int n;
- memset(&data, 0, sizeof(data));
- rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
- *start = (uintptr_t)data.start;
- *end = (uintptr_t)data.end;
- return data.ret;
+ DEBUG("port %u inserting MR(%p) to global cache",
+ dev->data->port_id, (void *)mr);
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx4_mr_cache entry = { 0, };
+
+ /* Find a contiguous chunk and advance the index. */
+ n = mr_find_next_chunk(mr, &entry, n);
+ if (!entry.end)
+ break;
+ if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
+ /*
+ * Overflowed, but the global table cannot be expanded
+ * because of deadlock.
+ */
+ return -1;
+ }
+ }
+ return 0;
}
/**
- * Obtain a memory region from a memory pool.
+ * Look up address in the original global MR list.
*
- * If a matching memory region already exists, it is returned with its
- * reference count incremented, otherwise a new one is registered.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
*
- * @param priv
- * Pointer to private structure.
- * @param mp
- * Pointer to memory pool.
+ * @return
+ * Found MR on match, NULL otherwise.
+ */
+static struct mlx4_mr *
+mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr;
+
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx4_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (addr >= ret.start && addr < ret.end) {
+ /* Found. */
+ *entry = ret;
+ return mr;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Look up address on device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
*
* @return
- * Memory region pointer, NULL in case of error and rte_errno is set.
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-struct mlx4_mr *
-mlx4_mr_get(struct priv *priv, struct rte_mempool *mp)
+static uint32_t
+mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
+ uintptr_t addr)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start;
- uintptr_t end;
- unsigned int i;
+ struct priv *priv = dev->data->dev_private;
+ uint16_t idx;
+ uint32_t lkey = UINT32_MAX;
struct mlx4_mr *mr;
- if (mlx4_check_mempool(mp, &start, &end) != 0) {
- rte_errno = EINVAL;
- ERROR("mempool %p: not virtually contiguous",
- (void *)mp);
- return NULL;
+ /*
+ * If the global cache has overflowed since it failed to expand the
+ * B-tree table, it can't have all the existing MRs. Then, the address
+ * has to be searched by traversing the original MR list instead, which
+ * is very slow path. Otherwise, the global cache is all inclusive.
+ */
+ if (!unlikely(priv->mr.cache.overflow)) {
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX)
+ *entry = (*priv->mr.cache.table)[idx];
+ } else {
+ /* Falling back to the slowest path. */
+ mr = mr_lookup_dev_list(dev, entry, addr);
+ if (mr != NULL)
+ lkey = entry->lkey;
}
- DEBUG("mempool %p area start=%p end=%p size=%zu",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- /* Round start and end to page boundary if found in memory segments. */
- for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
- uintptr_t addr = (uintptr_t)ms[i].addr;
- size_t len = ms[i].len;
- unsigned int align = ms[i].hugepage_sz;
-
- if ((start > addr) && (start < addr + len))
- start = RTE_ALIGN_FLOOR(start, align);
- if ((end > addr) && (end < addr + len))
- end = RTE_ALIGN_CEIL(end, align);
+ assert(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
+ return lkey;
+}
+
+/**
+ * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
+ * can raise memory free event and the callback function will spin on the lock.
+ *
+ * @param mr
+ * Pointer to MR to free.
+ */
+static void
+mr_free(struct mlx4_mr *mr)
+{
+ if (mr == NULL)
+ return;
+ DEBUG("freeing MR(%p):", (void *)mr);
+ if (mr->ibv_mr != NULL)
+ claim_zero(mlx4_glue->dereg_mr(mr->ibv_mr));
+ if (mr->ms_bmp != NULL)
+ rte_bitmap_free(mr->ms_bmp);
+ rte_free(mr);
+}
+
+/**
+ * Releass resources of detached MR having no online entry.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx4_mr_garbage_collect(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr_next;
+ struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
+
+ /*
+ * MR can't be freed with holding the lock because rte_free() could call
+ * memory free callback function. This will be a deadlock situation.
+ */
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach the whole free list and release it after unlocking. */
+ free_list = priv->mr.mr_free_list;
+ LIST_INIT(&priv->mr.mr_free_list);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Release resources. */
+ mr_next = LIST_FIRST(&free_list);
+ while (mr_next != NULL) {
+ struct mlx4_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ mr_free(mr);
}
- DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- rte_spinlock_lock(&priv->mr_lock);
- LIST_FOREACH(mr, &priv->mr, next)
- if (mp == mr->mp && start >= mr->start && end <= mr->end)
- break;
- if (mr) {
- ++mr->refcnt;
- goto release;
+}
+
+/* Called during rte_memseg_contig_walk() by mlx4_mr_create(). */
+static int
+mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct mr_find_contig_memsegs_data *data = arg;
+
+ if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
+ return 0;
+ /* Found, save it and stop walking. */
+ data->start = ms->addr_64;
+ data->end = ms->addr_64 + len;
+ data->msl = msl;
+ return 1;
+}
+
+/**
+ * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Register entire virtually contiguous memory chunk around the address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this will not be updated.
+ * @param addr
+ * Target virtual address to register.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+static uint32_t
+mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+ struct mlx4_mr *mr = NULL;
+ size_t len;
+ uint32_t ms_n;
+ uint32_t bmp_size;
+ void *bmp_mem;
+ int ms_idx_shift = -1;
+ unsigned int n;
+ struct mr_find_contig_memsegs_data data = {
+ .addr = addr,
+ };
+ struct mr_find_contig_memsegs_data data_re;
+
+ DEBUG("port %u creating a MR using address (%p)",
+ dev->data->port_id, (void *)addr);
+ /*
+ * Release detached MRs if any. This can't be called with holding either
+ * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
+ * been detached by the memory free event but it couldn't be released
+ * inside the callback due to deadlock. As a result, releasing resources
+ * is quite opportunistic.
+ */
+ mlx4_mr_garbage_collect(dev);
+ /*
+ * Find out a contiguous virtual address chunk in use, to which the
+ * given address belongs, in order to register maximum range. In the
+ * best case where mempools are not dynamically recreated and
+ * '--socket-mem' is speicified as an EAL option, it is very likely to
+ * have only one MR(LKey) per a socket and per a hugepage-size even
+ * though the system memory is highly fragmented.
+ */
+ if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
+ WARN("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_nolock;
}
- mr = rte_malloc(__func__, sizeof(*mr), 0);
- if (!mr) {
+alloc_resources:
+ /* Addresses must be page-aligned. */
+ assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ msl = data.msl;
+ ms = rte_mem_virt2memseg((void *)data.start, msl);
+ len = data.end - data.start;
+ assert(msl->page_sz == ms->hugepage_sz);
+ /* Number of memsegs in the range. */
+ ms_n = len / msl->page_sz;
+ DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " page_sz=0x%" PRIx64 ", ms_n=%u",
+ dev->data->port_id, (void *)addr,
+ data.start, data.end, msl->page_sz, ms_n);
+ /* Size of memory for bitmap. */
+ bmp_size = rte_bitmap_get_memory_footprint(ms_n);
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE) +
+ bmp_size,
+ RTE_CACHE_LINE_SIZE, msl->socket_id);
+ if (mr == NULL) {
+ WARN("port %u unable to allocate memory for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
rte_errno = ENOMEM;
- goto release;
+ goto err_nolock;
}
- *mr = (struct mlx4_mr){
- .start = start,
- .end = end,
- .refcnt = 1,
- .priv = priv,
- .mr = mlx4_glue->reg_mr(priv->pd, (void *)start, end - start,
- IBV_ACCESS_LOCAL_WRITE),
- .mp = mp,
- };
- if (mr->mr) {
- mr->lkey = mr->mr->lkey;
- LIST_INSERT_HEAD(&priv->mr, mr, next);
- } else {
- rte_free(mr);
- mr = NULL;
- rte_errno = errno ? errno : EINVAL;
+ mr->msl = msl;
+ /*
+ * Save the index of the first memseg and initialize memseg bitmap. To
+ * see if a memseg of ms_idx in the memseg-list is still valid, check:
+ * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
+ */
+ mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
+ mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
+ if (mr->ms_bmp == NULL) {
+ WARN("port %u unable to initialize bitamp for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_nolock;
+ }
+ /*
+ * Should recheck whether the extended contiguous chunk is still valid.
+ * Because memory_hotplug_lock can't be held if there's any memory
+ * related calls in a critical path, resource allocation above can't be
+ * locked. If the memory has been changed at this point, try again with
+ * just single page. If not, go on with the big chunk atomically from
+ * here.
+ */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ data_re = data;
+ if (len > msl->page_sz &&
+ !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
+ WARN("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_memlock;
+ }
+ if (data.start != data_re.start || data.end != data_re.end) {
+ /*
+ * The extended contiguous chunk has been changed. Try again
+ * with single memseg instead.
+ */
+ data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
+ data.end = data.start + msl->page_sz;
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ mr_free(mr);
+ goto alloc_resources;
}
-release:
- rte_spinlock_unlock(&priv->mr_lock);
- return mr;
+ assert(data.msl == data_re.msl);
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /*
+ * Check the address is really missing. If other thread already created
+ * one or it is not found due to overflow, abort and return.
+ */
+ if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
+ /*
+ * Insert to the global cache table. It may fail due to
+ * low-on-memory. Then, this entry will have to be searched
+ * here again.
+ */
+ mr_btree_insert(&priv->mr.cache, entry);
+ DEBUG("port %u found MR for %p on final lookup, abort",
+ dev->data->port_id, (void *)addr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ /*
+ * Must be unlocked before calling rte_free() because
+ * mlx4_mr_mem_event_free_cb() can be called inside.
+ */
+ mr_free(mr);
+ return entry->lkey;
+ }
+ /*
+ * Trim start and end addresses for verbs MR. Set bits for registering
+ * memsegs but exclude already registered ones. Bitmap can be
+ * fragmented.
+ */
+ for (n = 0; n < ms_n; ++n) {
+ uintptr_t start;
+ struct mlx4_mr_cache ret = { 0, };
+
+ start = data_re.start + n * msl->page_sz;
+ /* Exclude memsegs already registered by other MRs. */
+ if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
+ /*
+ * Start from the first unregistered memseg in the
+ * extended range.
+ */
+ if (ms_idx_shift == -1) {
+ mr->ms_base_idx += n;
+ data.start = start;
+ ms_idx_shift = n;
+ }
+ data.end = start + msl->page_sz;
+ rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
+ ++mr->ms_n;
+ }
+ }
+ len = data.end - data.start;
+ mr->ms_bmp_n = len / msl->page_sz;
+ assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ /*
+ * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
+ * called with holding the memory lock because it doesn't use
+ * mlx4_alloc_buf_extern() which eventually calls rte_malloc_socket()
+ * through mlx4_alloc_verbs_buf().
+ */
+ mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)data.start, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ WARN("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_mrlock;
+ }
+ assert((uintptr_t)mr->ibv_mr->addr == data.start);
+ assert(mr->ibv_mr->length == len);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DEBUG("port %u MR CREATED (%p) for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ /* Fill in output data. */
+ mr_lookup_dev(dev, entry, addr);
+ /* Lookup can't fail. */
+ assert(entry->lkey != UINT32_MAX);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ return entry->lkey;
+err_mrlock:
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+err_memlock:
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+err_nolock:
+ /*
+ * In case of error, as this can be called in a datapath, a warning
+ * message per an error is preferable instead. Must be unlocked before
+ * calling rte_free() because mlx4_mr_mem_event_free_cb() can be called
+ * inside.
+ */
+ mr_free(mr);
+ return UINT32_MAX;
}
/**
- * Release a memory region.
+ * Rebuild the global B-tree cache of device from the original MR list.
*
- * This function decrements its reference count and destroys it after
- * reaching 0.
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mr_rebuild_dev_cache(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr;
+
+ DEBUG("port %u rebuild dev cache[]", dev->data->port_id);
+ /* Flush cache to rebuild. */
+ priv->mr.cache.len = 1;
+ priv->mr.cache.overflow = 0;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr)
+ if (mr_insert_dev_cache(dev, mr) < 0)
+ return;
+}
+
+/**
+ * Callback for memory free event. Iterate freed memsegs and check whether it
+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
+ * later by mlx4_mr_garbage_collect().
*
- * Note to avoid race conditions given this function may be used from the
- * data plane, it's extremely important that each user holds its own
- * reference.
+ * The global cache must be rebuilt if there's any change and this event has to
+ * be propagated to dataplane threads to flush the local caches.
*
- * @param mr
- * Memory region to release.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Address of freed memory.
+ * @param len
+ * Size of freed memory.
+ */
+static void
+mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_memseg_list *msl;
+ struct mlx4_mr *mr;
+ int ms_n;
+ int i;
+ int rebuild = 0;
+
+ DEBUG("port %u free callback: addr=%p, len=%zu",
+ dev->data->port_id, addr, len);
+ msl = rte_mem_virt2memseg_list(addr);
+ /* addr and len must be page-aligned. */
+ assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ assert(len == RTE_ALIGN(len, msl->page_sz));
+ ms_n = len / msl->page_sz;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Clear bits of freed memsegs from MR. */
+ for (i = 0; i < ms_n; ++i) {
+ const struct rte_memseg *ms;
+ struct mlx4_mr_cache entry;
+ uintptr_t start;
+ int ms_idx;
+ uint32_t pos;
+
+ /* Find MR having this memseg. */
+ start = (uintptr_t)addr + i * msl->page_sz;
+ mr = mr_lookup_dev_list(dev, &entry, start);
+ if (mr == NULL)
+ continue;
+ ms = rte_mem_virt2memseg((void *)start, msl);
+ assert(ms != NULL);
+ assert(msl->page_sz == ms->hugepage_sz);
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ pos = ms_idx - mr->ms_base_idx;
+ assert(rte_bitmap_get(mr->ms_bmp, pos));
+ assert(pos < mr->ms_bmp_n);
+ DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
+ dev->data->port_id, (void *)mr, pos, (void *)start);
+ rte_bitmap_clear(mr->ms_bmp, pos);
+ if (--mr->ms_n == 0) {
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ DEBUG("port %u remove MR(%p) from list",
+ dev->data->port_id, (void *)mr);
+ }
+ /*
+ * MR is fragmented or will be freed. the global cache must be
+ * rebuilt.
+ */
+ rebuild = 1;
+ }
+ if (rebuild) {
+ mr_rebuild_dev_cache(dev);
+ /*
+ * Flush local caches by propagating invalidation across cores.
+ * rte_smp_wmb() is enough to synchronize this event. If one of
+ * freed memsegs is seen by other core, that means the memseg
+ * has been allocated by allocator, which will come after this
+ * free call. Therefore, this store instruction (incrementing
+ * generation below) will be guaranteed to be seen by other core
+ * before the core sees the newly allocated memory.
+ */
+ ++priv->mr.dev_gen;
+ DEBUG("broadcasting local cache flush, gen=%d",
+ priv->mr.dev_gen);
+ rte_smp_wmb();
+ }
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+#ifndef NDEBUG
+ if (rebuild)
+ mlx4_mr_dump_dev(dev);
+#endif
+}
+
+/**
+ * Callback for memory event.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
*/
void
-mlx4_mr_put(struct mlx4_mr *mr)
+mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
{
- struct priv *priv = mr->priv;
-
- rte_spinlock_lock(&priv->mr_lock);
- assert(mr->refcnt);
- if (--mr->refcnt)
- goto release;
- LIST_REMOVE(mr, next);
- claim_zero(mlx4_glue->dereg_mr(mr->mr));
- rte_free(mr);
-release:
- rte_spinlock_unlock(&priv->mr_lock);
+ struct priv *priv;
+
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ rte_rwlock_read_lock(&mlx4_mem_event_rwlock);
+ /* Iterate all the existing mlx4 devices. */
+ LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb)
+ mlx4_mr_mem_event_free_cb(priv->dev, addr, len);
+ rte_rwlock_read_unlock(&mlx4_mem_event_rwlock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
+ }
}
/**
- * Add memory region (MR) <-> memory pool (MP) association to txq->mp2mr[].
- * If mp2mr[] is full, remove an entry first.
+ * Look up address in the global MR cache table. If not found, create a new MR.
+ * Insert the found/created entry to local bottom-half cache table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this is not written.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct mlx4_mr_cache *entry, uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh;
+ uint16_t idx;
+ uint32_t lkey;
+
+ /* If local cache table is full, try to double it. */
+ if (unlikely(bt->len == bt->size))
+ mr_btree_expand(bt, bt->size << 1);
+ /* Look up in the global cache. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX) {
+ /* Found. */
+ *entry = (*priv->mr.cache.table)[idx];
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /*
+ * Update local cache. Even if it fails, return the found entry
+ * to update top-half cache. Next time, this entry will be found
+ * in the global cache.
+ */
+ mr_btree_insert(bt, entry);
+ return lkey;
+ }
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /* First time to see the address? Create a new MR. */
+ lkey = mlx4_mr_create(dev, entry, addr);
+ /*
+ * Update the local cache if successfully created a new global MR. Even
+ * if failed to create one, there's no action to take in this datapath
+ * code. As returning LKey is invalid, this will eventually make HW
+ * fail.
+ */
+ if (lkey != UINT32_MAX)
+ mr_btree_insert(bt, entry);
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
+ * misses, search in the global MR cache table and update the new entry to
+ * per-queue local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx4_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ uintptr_t addr)
+{
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+ /* Victim in top-half cache to replace with new entry. */
+ struct mlx4_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
+
+ /* Binary-search MR translation table. */
+ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+ /* Update top-half cache. */
+ if (likely(lkey != UINT32_MAX)) {
+ *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
+ } else {
+ /*
+ * If missed in local lookup table, search in the global cache
+ * and local cache_bh[] will be updated inside if possible.
+ * Top-half cache entry will also be updated.
+ */
+ lkey = mlx4_mr_lookup_dev(dev, mr_ctrl, repl, addr);
+ if (unlikely(lkey == UINT32_MAX))
+ return UINT32_MAX;
+ }
+ /* Update the most recently used entry. */
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX4_MR_CACHE_N;
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on Rx.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ struct priv *priv = rxq->priv;
+
+ DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+}
+
+/**
+ * Bottom-half of LKey search on Tx.
*
* @param txq
* Pointer to Tx queue structure.
- * @param[in] mp
- * Memory pool for which a memory region lkey must be added.
- * @param[in] i
- * Index in memory pool (MP) where to add memory region (MR).
+ * @param addr
+ * Search key.
*
* @return
- * Added mr->lkey on success, (uint32_t)-1 on failure.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
uint32_t
-mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, uint32_t i)
+mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
{
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq->priv;
+
+ DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+}
+
+/**
+ * Flush all of the local cache entries.
+ *
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ */
+void
+mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl)
+{
+ /* Reset the most-recently-used index. */
+ mr_ctrl->mru = 0;
+ /* Reset the linear search array. */
+ mr_ctrl->head = 0;
+ memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
+ /* Reset the B-tree table. */
+ mr_ctrl->cache_bh.len = 1;
+ mr_ctrl->cache_bh.overflow = 0;
+ /* Update the generation number. */
+ mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
+ DEBUG("mr_ctrl(%p): flushed, cur_gen=%d",
+ (void *)mr_ctrl, mr_ctrl->cur_gen);
+}
+
+/* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */
+static void
+mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ uint32_t lkey;
+
+ /* Stop iteration if failed in the previous walk. */
+ if (data->ret < 0)
+ return;
+ /* Register address of the chunk and update local caches. */
+ lkey = mlx4_mr_addr2mr_bh(data->dev, data->mr_ctrl,
+ (uintptr_t)memhdr->addr);
+ if (lkey == UINT32_MAX)
+ data->ret = -1;
+}
+
+/**
+ * Register entire memory chunks in a Mempool.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+int
+mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data);
+ return data.ret;
+}
+
+#ifndef NDEBUG
+/**
+ * Dump all the created MRs and the global cache entries.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx4_mr_dump_dev(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
+ int mr_n = 0;
+ int chunk_n = 0;
+
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ dev->data->port_id, mr_n++,
+ rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_n, mr->ms_bmp_n);
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx4_mr_cache ret = { 0, };
- /* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq, mp->name, (void *)mp);
- mr = mlx4_mr_get(txq->priv, mp);
- if (unlikely(mr == NULL)) {
- DEBUG("%p: unable to configure MR, mlx4_mr_get() failed",
- (void *)txq);
- return (uint32_t)-1;
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (!ret.end)
+ break;
+ DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+ chunk_n++, ret.start, ret.end);
+ }
}
- if (unlikely(i == RTE_DIM(txq->mp2mr))) {
- /* Table is full, remove oldest entry. */
- DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
- (void *)txq);
- --i;
- mlx4_mr_put(txq->mp2mr[0].mr);
- memmove(&txq->mp2mr[0], &txq->mp2mr[1],
- (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
+ DEBUG("port %u dumping global cache", dev->data->port_id);
+ mlx4_mr_btree_dump(&priv->mr.cache);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+}
+#endif
+
+/**
+ * Release all the created MRs and resources. Remove device from memory callback
+ * list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx4_mr_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+
+ /* Remove from memory callback device list. */
+ rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
+ LIST_REMOVE(priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx4_mem_event_rwlock);
+#ifndef NDEBUG
+ mlx4_mr_dump_dev(dev);
+#endif
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach from MR list and move to free list. */
+ while (mr_next != NULL) {
+ struct mlx4_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
}
- /* Store the new entry. */
- txq->mp2mr[i].mp = mp;
- txq->mp2mr[i].mr = mr;
- txq->mp2mr[i].lkey = mr->lkey;
- DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
- return txq->mp2mr[i].lkey;
+ LIST_INIT(&priv->mr.mr_list);
+ /* Free global cache. */
+ mlx4_mr_btree_free(&priv->mr.cache);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Free all remaining MRs. */
+ mlx4_mr_garbage_collect(dev);
}
diff --git a/drivers/net/mlx4/mlx4_mr.h b/drivers/net/mlx4/mlx4_mr.h
new file mode 100644
index 00000000..37a365a8
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_mr.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX4_MR_H_
+#define RTE_PMD_MLX4_MR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_eal_memconfig.h>
+#include <rte_ethdev.h>
+#include <rte_rwlock.h>
+#include <rte_bitmap.h>
+
+/* Size of per-queue MR cache array for linear search. */
+#define MLX4_MR_CACHE_N 8
+
+/* Size of MR cache table for binary search. */
+#define MLX4_MR_BTREE_CACHE_N 256
+
+/* Memory Region object. */
+struct mlx4_mr {
+ LIST_ENTRY(mlx4_mr) mr; /**< Pointer to the prev/next entry. */
+ struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
+ const struct rte_memseg_list *msl;
+ int ms_base_idx; /* Start index of msl->memseg_arr[]. */
+ int ms_n; /* Number of memsegs in use. */
+ uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
+ struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
+};
+
+/* Cache entry for Memory Region. */
+struct mlx4_mr_cache {
+ uintptr_t start; /* Start address of MR. */
+ uintptr_t end; /* End address of MR. */
+ uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
+} __rte_packed;
+
+/* MR Cache table for Binary search. */
+struct mlx4_mr_btree {
+ uint16_t len; /* Number of entries. */
+ uint16_t size; /* Total number of entries. */
+ int overflow; /* Mark failure of table expansion. */
+ struct mlx4_mr_cache (*table)[];
+} __rte_packed;
+
+/* Per-queue MR control descriptor. */
+struct mlx4_mr_ctrl {
+ uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
+ uint32_t cur_gen; /* Generation number saved to flush caches. */
+ uint16_t mru; /* Index of last hit entry in top-half cache. */
+ uint16_t head; /* Index of the oldest entry in top-half cache. */
+ struct mlx4_mr_cache cache[MLX4_MR_CACHE_N]; /* Cache for top-half. */
+ struct mlx4_mr_btree cache_bh; /* Cache for bottom-half. */
+} __rte_packed;
+
+extern struct mlx4_dev_list mlx4_mem_event_cb_list;
+extern rte_rwlock_t mlx4_mem_event_rwlock;
+
+/* First entry must be NULL for comparison. */
+#define mlx4_mr_btree_len(bt) ((bt)->len - 1)
+
+int mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket);
+void mlx4_mr_btree_free(struct mlx4_mr_btree *bt);
+void mlx4_mr_btree_dump(struct mlx4_mr_btree *bt);
+void mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg);
+int mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp);
+void mlx4_mr_dump_dev(struct rte_eth_dev *dev);
+void mlx4_mr_release(struct rte_eth_dev *dev);
+
+/**
+ * Look up LKey from given lookup table by linear search. Firstly look up the
+ * last-hit entry. If miss, the entire array is searched. If found, update the
+ * last-hit index and return LKey.
+ *
+ * @param lkp_tbl
+ * Pointer to lookup table.
+ * @param[in,out] cached_idx
+ * Pointer to last-hit index.
+ * @param n
+ * Size of lookup table.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx4_mr_lookup_cache(struct mlx4_mr_cache *lkp_tbl, uint16_t *cached_idx,
+ uint16_t n, uintptr_t addr)
+{
+ uint16_t idx;
+
+ if (likely(addr >= lkp_tbl[*cached_idx].start &&
+ addr < lkp_tbl[*cached_idx].end))
+ return lkp_tbl[*cached_idx].lkey;
+ for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
+ if (addr >= lkp_tbl[idx].start &&
+ addr < lkp_tbl[idx].end) {
+ /* Found. */
+ *cached_idx = idx;
+ return lkp_tbl[idx].lkey;
+ }
+ }
+ return UINT32_MAX;
+}
+
+#endif /* RTE_PMD_MLX4_MR_H_ */
diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h
index 153dda52..e15a3c14 100644
--- a/drivers/net/mlx4/mlx4_prm.h
+++ b/drivers/net/mlx4/mlx4_prm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef MLX4_PRM_H_
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 7a036ed8..87688c1c 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -88,7 +88,7 @@ mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
*/
struct mlx4_rss *
mlx4_rss_get(struct priv *priv, uint64_t fields,
- uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[])
{
struct mlx4_rss *rss;
@@ -336,6 +336,8 @@ mlx4_rss_init(struct priv *priv)
unsigned int i;
int ret;
+ if (priv->rss_init)
+ return 0;
/* Prepare range for RSS contexts before creating the first WQ. */
ret = mlx4_glue->dv_set_context_attr
(priv->ctx,
@@ -418,6 +420,7 @@ wq_num_check:
}
wq_num_prev = wq_num;
}
+ priv->rss_init = 1;
return 0;
error:
ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
@@ -446,6 +449,8 @@ mlx4_rss_deinit(struct priv *priv)
{
unsigned int i;
+ if (!priv->rss_init)
+ return;
for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
struct rxq *rxq = priv->dev->data->rx_queues[i];
@@ -454,6 +459,7 @@ mlx4_rss_deinit(struct priv *priv)
mlx4_rxq_detach(rxq);
}
}
+ priv->rss_init = 0;
}
/**
@@ -482,6 +488,7 @@ mlx4_rxq_attach(struct rxq *rxq)
}
struct priv *priv = rxq->priv;
+ struct rte_eth_dev *dev = priv->dev;
const uint32_t elts_n = 1 << rxq->elts_n;
const uint32_t sges_n = 1 << rxq->sges_n;
struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
@@ -491,6 +498,8 @@ mlx4_rxq_attach(struct rxq *rxq)
const char *msg;
struct ibv_cq *cq = NULL;
struct ibv_wq *wq = NULL;
+ uint32_t create_flags = 0;
+ uint32_t comp_mask = 0;
volatile struct mlx4_wqe_data_seg (*wqes)[];
unsigned int i;
int ret;
@@ -503,6 +512,11 @@ mlx4_rxq_attach(struct rxq *rxq)
msg = "CQ creation failure";
goto error;
}
+ /* By default, FCS (CRC) is stripped by hardware. */
+ if (rxq->crc_present) {
+ create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ }
wq = mlx4_glue->create_wq
(priv->ctx,
&(struct ibv_wq_init_attr){
@@ -511,6 +525,8 @@ mlx4_rxq_attach(struct rxq *rxq)
.max_sge = sges_n,
.pd = priv->pd,
.cq = cq,
+ .comp_mask = comp_mask,
+ .create_flags = create_flags,
});
if (!wq) {
ret = errno ? errno : EINVAL;
@@ -537,6 +553,11 @@ mlx4_rxq_attach(struct rxq *rxq)
msg = "failed to obtain device information from WQ/CQ objects";
goto error;
}
+ /* Pre-register Rx mempool. */
+ DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
+ priv->dev->data->port_id, rxq->stats.idx,
+ rxq->mp->name, rxq->mp->nb_mem_chunks);
+ mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
wqes = (volatile struct mlx4_wqe_data_seg (*)[])
((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset);
for (i = 0; i != RTE_DIM(*elts); ++i) {
@@ -568,7 +589,7 @@ mlx4_rxq_attach(struct rxq *rxq)
.addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
uintptr_t)),
.byte_count = rte_cpu_to_be_32(buf->data_len),
- .lkey = rte_cpu_to_be_32(rxq->mr->lkey),
+ .lkey = mlx4_rx_mb2mr(rxq, buf),
};
(*elts)[i] = buf;
}
@@ -597,6 +618,7 @@ error:
claim_zero(mlx4_glue->destroy_wq(wq));
if (cq)
claim_zero(mlx4_glue->destroy_cq(cq));
+ --rxq->usecnt;
rte_errno = ret;
ERROR("error while attaching Rx queue %p: %s: %s",
(void *)rxq, msg, strerror(ret));
@@ -676,26 +698,6 @@ mlx4_get_rx_port_offloads(struct priv *priv)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param requested
- * Per-queue offloads configuration.
- *
- * @return
- * Nonzero when configuration is valid.
- */
-static int
-mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
-{
- uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = mlx4_get_rx_port_offloads(priv);
-
- return !((mandatory ^ requested) & supported);
-}
-
-/**
* DPDK callback to configure a Rx queue.
*
* @param dev
@@ -736,20 +738,14 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
},
};
int ret;
+ uint32_t crc_present;
+ uint64_t offloads;
+
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
- (void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
- if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
- rte_errno = ENOTSUP;
- ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- (mlx4_get_rx_port_offloads(priv) |
- mlx4_get_rx_queue_offloads(priv)));
- return -rte_errno;
- }
+
if (idx >= dev->data->nb_rx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -774,6 +770,23 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
" to the next power of two (%u)",
(void *)dev, idx, desc);
}
+ /* By default, FCS (CRC) is stripped by hardware. */
+ if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ crc_present = 0;
+ } else if (priv->hw_fcs_strip) {
+ crc_present = 1;
+ } else {
+ WARN("%p: CRC stripping has been disabled but will still"
+ " be performed by hardware, make sure MLNX_OFED and"
+ " firmware are up to date",
+ (void *)dev);
+ crc_present = 0;
+ }
+ DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
+ " incoming frames to hide it",
+ (void *)dev,
+ crc_present ? "disabled" : "enabled",
+ crc_present << 2);
/* Allocate and initialize Rx queue. */
mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
if (!rxq) {
@@ -790,9 +803,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
.csum = priv->hw_csum &&
- (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & DEV_RX_OFFLOAD_CHECKSUM),
.csum_l2tun = priv->hw_csum_l2tun &&
- (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ .crc_present = crc_present,
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
.idx = idx,
@@ -804,7 +818,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -847,11 +861,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1 << rxq->sges_n);
goto error;
}
- /* Use the entire Rx mempool as the memory region. */
- rxq->mr = mlx4_mr_get(priv, mp);
- if (!rxq->mr) {
- ERROR("%p: MR creation failure: %s",
- (void *)dev, strerror(rte_errno));
+ if (mlx4_mr_btree_init(&rxq->mr_ctrl.cache_bh,
+ MLX4_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
goto error;
}
if (dev->data->dev_conf.intr_conf.rxq) {
@@ -911,7 +923,6 @@ mlx4_rx_queue_release(void *dpdk_rxq)
assert(!rxq->rq_db);
if (rxq->channel)
claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));
- if (rxq->mr)
- mlx4_mr_put(rxq->mr);
+ mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh);
rte_free(rxq);
}
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
index 8ca8b77c..a92da66b 100644
--- a/drivers/net/mlx4/mlx4_rxtx.c
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -263,7 +263,7 @@ mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, volatile uint32_t *start,
} while (start != (volatile uint32_t *)sq->eob);
start = (volatile uint32_t *)sq->buf;
/* Flip invalid stamping ownership. */
- stamp ^= RTE_BE32(0x1 << MLX4_SQ_OWNER_BIT);
+ stamp ^= RTE_BE32(1u << MLX4_SQ_OWNER_BIT);
sq->stamp = stamp;
if (start == end)
return size;
@@ -344,24 +344,6 @@ mlx4_txq_complete(struct txq *txq, const unsigned int elts_m,
}
/**
- * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which
- * the cloned mbuf is allocated is returned instead.
- *
- * @param buf
- * Pointer to mbuf.
- *
- * @return
- * Memory pool where data is located for given mbuf.
- */
-static struct rte_mempool *
-mlx4_txq_mb2mp(struct rte_mbuf *buf)
-{
- if (unlikely(RTE_MBUF_INDIRECT(buf)))
- return rte_mbuf_from_indirect(buf)->pool;
- return buf->pool;
-}
-
-/**
* Write Tx data segment to the SQ.
*
* @param dseg
@@ -378,7 +360,7 @@ mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg,
uint32_t lkey, uintptr_t addr, rte_be32_t byte_count)
{
dseg->addr = rte_cpu_to_be_64(addr);
- dseg->lkey = rte_cpu_to_be_32(lkey);
+ dseg->lkey = lkey;
#if RTE_CACHE_LINE_SIZE < 64
/*
* Need a barrier here before writing the byte_count
@@ -437,7 +419,7 @@ mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
goto txbb_tail_segs;
txbb_head_seg:
/* Memory region key (big endian) for this memory pool. */
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
DEBUG("%p: unable to get MP <-> MR association",
(void *)txq);
@@ -449,7 +431,7 @@ txbb_head_seg:
dseg = (volatile struct mlx4_wqe_data_seg *)
sq->buf;
dseg->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf, uintptr_t));
- dseg->lkey = rte_cpu_to_be_32(lkey);
+ dseg->lkey = lkey;
/*
* This data segment starts at the beginning of a new
* TXBB, so we need to postpone its byte_count writing
@@ -469,7 +451,7 @@ txbb_tail_segs:
/* Jump to default if there are more than two segments remaining. */
switch (nb_segs) {
default:
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
DEBUG("%p: unable to get MP <-> MR association",
(void *)txq);
@@ -485,7 +467,7 @@ txbb_tail_segs:
nb_segs--;
/* fallthrough */
case 2:
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
DEBUG("%p: unable to get MP <-> MR association",
(void *)txq);
@@ -501,7 +483,7 @@ txbb_tail_segs:
nb_segs--;
/* fallthrough */
case 1:
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
DEBUG("%p: unable to get MP <-> MR association",
(void *)txq);
@@ -611,7 +593,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
elt->buf = NULL;
break;
}
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
+ lkey = mlx4_tx_mb2mr(txq, buf);
if (unlikely(lkey == (uint32_t)-1)) {
/* MR does not exist. */
DEBUG("%p: unable to get MP <-> MR association",
@@ -639,7 +621,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
ctrl_next = (volatile struct mlx4_wqe_ctrl_seg *)
((volatile uint8_t *)ctrl_next - sq->size);
/* Flip HW valid ownership. */
- sq->owner_opcode ^= 0x1 << MLX4_SQ_OWNER_BIT;
+ sq->owner_opcode ^= 1u << MLX4_SQ_OWNER_BIT;
}
/*
* For raw Ethernet, the SOLICIT flag is used to indicate
@@ -934,11 +916,14 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
goto skip;
}
pkt = seg;
+ assert(len >= (rxq->crc_present << 2));
/* Update packet information. */
pkt->packet_type =
rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
pkt->ol_flags = PKT_RX_RSS_HASH;
pkt->hash.rss = cqe->immed_rss_invalid;
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
pkt->pkt_len = len;
if (rxq->csum | rxq->csum_l2tun) {
uint32_t flags =
@@ -963,6 +948,9 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* changes.
*/
scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx4_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ scat->lkey = mlx4_rx_mb2mr(rxq, rep);
if (len > seg->data_len) {
len -= seg->data_len;
++pkt->nb_segs;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index c12bd39a..4c025e3a 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef MLX4_RXTX_H_
@@ -25,6 +25,7 @@
#include "mlx4.h"
#include "mlx4_prm.h"
+#include "mlx4_mr.h"
/** Rx queue counters. */
struct mlx4_rxq_stats {
@@ -39,7 +40,6 @@ struct mlx4_rxq_stats {
struct rxq {
struct priv *priv; /**< Back pointer to private data. */
struct rte_mempool *mp; /**< Memory pool for allocations. */
- struct mlx4_mr *mr; /**< Memory region. */
struct ibv_cq *cq; /**< Completion queue. */
struct ibv_wq *wq; /**< Work queue. */
struct ibv_comp_channel *channel; /**< Rx completion channel. */
@@ -47,11 +47,13 @@ struct rxq {
uint16_t port_id; /**< Port ID for incoming packets. */
uint16_t sges_n; /**< Number of segments per packet (log2 value). */
uint16_t elts_n; /**< Mbuf queue size (log2 value). */
+ struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
struct rte_mbuf *(*elts)[]; /**< Rx elements. */
volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */
volatile uint32_t *rq_db; /**< RQ doorbell record. */
uint32_t csum:1; /**< Enable checksum offloading. */
uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
+ uint32_t crc_present:1; /**< CRC must be subtracted. */
uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */
struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
struct mlx4_rxq_stats stats; /**< Rx queue counters. */
@@ -83,7 +85,7 @@ struct txq_elt {
};
};
-/** Rx queue counters. */
+/** Tx queue counters. */
struct mlx4_txq_stats {
unsigned int idx; /**< Mapping index. */
uint64_t opackets; /**< Total of successfully sent packets. */
@@ -100,6 +102,7 @@ struct txq {
int elts_comp_cd; /**< Countdown for next completion. */
unsigned int elts_comp_cd_init; /**< Initial value for countdown. */
unsigned int elts_n; /**< (*elts)[] length. */
+ struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
struct txq_elt (*elts)[]; /**< Tx elements. */
struct mlx4_txq_stats stats; /**< Tx queue counters. */
uint32_t max_inline; /**< Max inline send size. */
@@ -108,11 +111,6 @@ struct txq {
uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */
uint8_t *bounce_buf;
/**< Memory used for storing the first DWORD of data TXBBs. */
- struct {
- const struct rte_mempool *mp; /**< Cached memory pool. */
- struct mlx4_mr *mr; /**< Memory region (for mp). */
- uint32_t lkey; /**< mr->lkey copy. */
- } mp2mr[MLX4_PMD_TX_MP_CACHE]; /**< MP to MR translation table. */
struct priv *priv; /**< Back pointer to private data. */
unsigned int socket; /**< CPU socket ID for allocations. */
struct ibv_cq *cq; /**< Completion queue. */
@@ -126,7 +124,7 @@ uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
int mlx4_rss_init(struct priv *priv);
void mlx4_rss_deinit(struct priv *priv);
struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields,
- uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[]);
void mlx4_rss_put(struct mlx4_rss *rss);
int mlx4_rss_attach(struct mlx4_rss *rss);
@@ -160,34 +158,70 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
const struct rte_eth_txconf *conf);
void mlx4_tx_queue_release(void *dpdk_txq);
+/* mlx4_mr.c */
+
+void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
+uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
+uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr);
+
/**
- * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[].
- * Call mlx4_txq_add_mr() if MP is not registered yet.
+ * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
+ * as mempool is pre-configured and static.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Address to search.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Linear search on MR cache array. */
+ lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX4_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (Binary Search) on miss. */
+ return mlx4_rx_addr2mr_bh(rxq, addr);
+}
+
+#define mlx4_rx_mb2mr(rxq, mb) mlx4_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
+/**
+ * Query LKey from a packet buffer for Tx. If not found, add the mempool.
*
* @param txq
* Pointer to Tx queue structure.
- * @param[in] mp
- * Memory pool for which a memory region lkey must be returned.
+ * @param addr
+ * Address to search.
*
* @return
- * mr->lkey on success, (uint32_t)-1 on failure.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-static inline uint32_t
-mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+static __rte_always_inline uint32_t
+mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr)
{
- unsigned int i;
-
- for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i].mp == NULL)) {
- /* Unknown MP, add a new MR for it. */
- break;
- }
- if (txq->mp2mr[i].mp == mp) {
- /* MP found MP. */
- return txq->mp2mr[i].lkey;
- }
- }
- return mlx4_txq_add_mr(txq, mp, i);
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Check generation bit to see if there's any change on existing MRs. */
+ if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
+ mlx4_mr_flush_local_cache(mr_ctrl);
+ /* Linear search on MR cache array. */
+ lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX4_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (binary search) on miss. */
+ return mlx4_tx_addr2mr_bh(txq, addr);
}
+#define mlx4_tx_mb2mr(rxq, mb) mlx4_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
#endif /* MLX4_RXTX_H_ */
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 071b2d5d..6edaadbb 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -63,64 +63,6 @@ mlx4_txq_free_elts(struct txq *txq)
txq->elts_tail = txq->elts_head;
}
-struct txq_mp2mr_mbuf_check_data {
- int ret;
-};
-
-/**
- * Callback function for rte_mempool_obj_iter() to check whether a given
- * mempool object looks like a mbuf.
- *
- * @param[in] mp
- * The mempool pointer
- * @param[in] arg
- * Context data (struct mlx4_txq_mp2mr_mbuf_check_data). Contains the
- * return value.
- * @param[in] obj
- * Object address.
- * @param index
- * Object index, unused.
- */
-static void
-mlx4_txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
- uint32_t index)
-{
- struct txq_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf = obj;
-
- (void)index;
- /*
- * Check whether mbuf structure fits element size and whether mempool
- * pointer is valid.
- */
- if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
- data->ret = -1;
-}
-
-/**
- * Iterator function for rte_mempool_walk() to register existing mempools and
- * fill the MP to MR cache of a Tx queue.
- *
- * @param[in] mp
- * Memory Pool to register.
- * @param *arg
- * Pointer to Tx queue structure.
- */
-static void
-mlx4_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
-{
- struct txq *txq = arg;
- struct txq_mp2mr_mbuf_check_data data = {
- .ret = 0,
- };
-
- /* Register mempool only if the first element looks like a mbuf. */
- if (rte_mempool_obj_iter(mp, mlx4_txq_mp2mr_mbuf_check, &data) == 0 ||
- data.ret == -1)
- return;
- mlx4_txq_mp2mr(txq, mp);
-}
-
/**
* Retrieves information needed in order to directly access the Tx queue.
*
@@ -144,9 +86,9 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift);
/* Continuous headroom size bytes must always stay freed. */
sq->remain_size = sq->size - headroom_size;
- sq->owner_opcode = MLX4_OPCODE_SEND | (0 << MLX4_SQ_OWNER_BIT);
+ sq->owner_opcode = MLX4_OPCODE_SEND | (0u << MLX4_SQ_OWNER_BIT);
sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
- (0 << MLX4_SQ_OWNER_BIT));
+ (0u << MLX4_SQ_OWNER_BIT));
sq->db = dqp->sdb;
sq->doorbell_qpn = dqp->doorbell_qpn;
cq->buf = dcq->buf.buf;
@@ -180,26 +122,6 @@ mlx4_get_tx_port_offloads(struct priv *priv)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param requested
- * Per-queue offloads configuration.
- *
- * @return
- * Nonzero when configuration is valid.
- */
-static int
-mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
-{
- uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
- uint64_t supported = mlx4_get_tx_port_offloads(priv);
-
- return !((mandatory ^ requested) & supported);
-}
-
-/**
* DPDK callback to configure a Tx queue.
*
* @param dev
@@ -246,23 +168,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
},
};
int ret;
+ uint64_t offloads;
+
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
- rte_errno = ENOTSUP;
- ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- mlx4_get_tx_port_offloads(priv));
- return -rte_errno;
- }
+
if (idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -313,11 +225,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts_comp_cd_init =
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
.csum = priv->hw_csum &&
- (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ (offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM)),
.csum_l2tun = priv->hw_csum_l2tun &&
- (conf->offloads &
+ (offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
/* Enable Tx loopback for VF devices. */
.lb = !!priv->vf,
@@ -404,8 +316,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
/* Save first wqe pointer in the first element. */
(&(*txq->elts)[0])->wqe =
(volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf;
- /* Pre-register known mempools. */
- rte_mempool_walk(mlx4_txq_mp2mr_iter, txq);
+ if (mlx4_mr_btree_init(&txq->mr_ctrl.cache_bh,
+ MLX4_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ /* Save pointer of global generation number to check memory event. */
+ txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
dev->data->tx_queues[idx] = txq;
return 0;
@@ -446,11 +363,6 @@ mlx4_tx_queue_release(void *dpdk_txq)
claim_zero(mlx4_glue->destroy_qp(txq->qp));
if (txq->cq)
claim_zero(mlx4_glue->destroy_cq(txq->cq));
- for (i = 0; i != RTE_DIM(txq->mp2mr); ++i) {
- if (!txq->mp2mr[i].mp)
- break;
- assert(txq->mp2mr[i].mr);
- mlx4_mr_put(txq->mp2mr[i].mr);
- }
+ mlx4_mr_btree_free(&txq->mr_ctrl.cache_bh);
rte_free(txq);
}
diff --git a/drivers/net/mlx4/mlx4_utils.c b/drivers/net/mlx4/mlx4_utils.c
index d10812ec..a727d703 100644
--- a/drivers/net/mlx4/mlx4_utils.c
+++ b/drivers/net/mlx4/mlx4_utils.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
diff --git a/drivers/net/mlx4/mlx4_utils.h b/drivers/net/mlx4/mlx4_utils.h
index 9fdbacad..86abb3b7 100644
--- a/drivers/net/mlx4/mlx4_utils.h
+++ b/drivers/net/mlx4/mlx4_utils.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef MLX4_UTILS_H_
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 3bc9736c..8a5229e6 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2015 6WIND S.A.
-# Copyright 2015 Mellanox.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Copyright 2015 Mellanox Technologies, Ltd
include $(RTE_SDK)/mk/rte.vars.mk
@@ -35,7 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_mlx5.a
LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
LIB_GLUE_BASE = librte_pmd_mlx5_glue.so
-LIB_GLUE_VERSION = 18.02.0
+LIB_GLUE_VERSION = 18.05.0
# Sources.
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c
@@ -59,6 +32,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c
ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)
@@ -92,6 +66,9 @@ CFLAGS += -Wno-error=cast-qual
EXPORT_MAP := rte_pmd_mlx5_version.map
LIBABIVER := 1
+# memseg walk is not part of stable API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# DEBUG which is usually provided on the command-line may enable
# CONFIG_RTE_LIBRTE_MLX5_DEBUG.
ifeq ($(DEBUG),1)
@@ -105,10 +82,6 @@ else
CFLAGS += -DNDEBUG -UPEDANTIC
endif
-ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE
-CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE)
-endif
-
include $(RTE_SDK)/mk/rte.lib.mk
# Generate and clean-up mlx5_autoconf.h.
@@ -125,9 +98,19 @@ mlx5_autoconf.h.new: FORCE
mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
- HAVE_IBV_DEVICE_VXLAN_SUPPORT \
+ HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_MASK_STRIDING_RQ \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_TUNNEL_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_MPLS_SUPPORT \
infiniband/verbs.h \
- enum IBV_DEVICE_VXLAN_SUPPORT \
+ enum IBV_FLOW_SPEC_MPLS \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_IBV_WQ_FLAG_RX_END_PADDING \
@@ -135,6 +118,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
enum IBV_WQ_FLAG_RX_END_PADDING \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_SWP \
+ infiniband/mlx5dv.h \
+ type 'struct mlx5dv_sw_parsing_caps' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_IBV_MLX5_MOD_MPW \
infiniband/mlx5dv.h \
enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
@@ -181,8 +169,13 @@ ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
$(LIB): $(LIB_GLUE)
+ifeq ($(LINK_USING_CC),1)
+GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))
+else
+GLUE_LDFLAGS := $(LDFLAGS)
+endif
$(LIB_GLUE): mlx5_glue.o
- $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \
+ $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
-Wl,-h,$(LIB_GLUE) \
-s -shared -o $@ $< -libverbs -lmlx5
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 6c0985bd..c933e274 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -13,6 +13,7 @@
#include <errno.h>
#include <net/if.h>
#include <sys/mman.h>
+#include <linux/rtnetlink.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -33,6 +34,8 @@
#include <rte_config.h>
#include <rte_eal_memconfig.h>
#include <rte_kvargs.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
#include "mlx5.h"
#include "mlx5_utils.h"
@@ -40,10 +43,23 @@
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5_glue.h"
+#include "mlx5_mr.h"
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
+/* Device parameter to enable Multi-Packet Rx queue. */
+#define MLX5_RX_MPRQ_EN "mprq_en"
+
+/* Device parameter to configure log 2 of the number of strides for MPRQ. */
+#define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
+
+/* Device parameter to limit the size of memcpy'd packet for MPRQ. */
+#define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
+
+/* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
+#define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
+
/* Device parameter to configure inline send. */
#define MLX5_TXQ_INLINE "txq_inline"
@@ -68,6 +84,12 @@
/* Device parameter to enable hardware Rx vector. */
#define MLX5_RX_VEC_EN "rx_vec_en"
+/* Allow L3 VXLAN flow creation. */
+#define MLX5_L3_VXLAN_EN "l3_vxlan_en"
+
+/* Activate Netlink support in VF mode. */
+#define MLX5_VF_NL_EN "vf_nl_en"
+
#ifndef HAVE_IBV_MLX5_MOD_MPW
#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -77,6 +99,50 @@
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
#endif
+static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
+
+/* Shared memory between primary and secondary processes. */
+struct mlx5_shared_data *mlx5_shared_data;
+
+/* Spinlock for mlx5_shared_data allocation. */
+static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+
+/** Driver-specific log messages type. */
+int mlx5_logtype;
+
+/**
+ * Prepare shared data between primary and secondary process.
+ */
+static void
+mlx5_prepare_shared_data(void)
+{
+ const struct rte_memzone *mz;
+
+ rte_spinlock_lock(&mlx5_shared_data_lock);
+ if (mlx5_shared_data == NULL) {
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* Allocate shared memory. */
+ mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
+ sizeof(*mlx5_shared_data),
+ SOCKET_ID_ANY, 0);
+ } else {
+ /* Lookup allocated shared memory. */
+ mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
+ }
+ if (mz == NULL)
+ rte_panic("Cannot allocate mlx5 shared data\n");
+ mlx5_shared_data = mz->addr;
+ /* Initialize shared data. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
+ rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
+ }
+ rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+ mlx5_mr_mem_event_cb, NULL);
+ }
+ rte_spinlock_unlock(&mlx5_shared_data_lock);
+}
+
/**
* Retrieve integer value from environment variable.
*
@@ -108,7 +174,7 @@ mlx5_getenv_int(const char *name)
* A pointer to the callback data.
*
* @return
- * a pointer to the allocate space.
+ * Allocated buffer, NULL otherwise and rte_errno is set.
*/
static void *
mlx5_alloc_verbs_buf(size_t size, void *data)
@@ -130,7 +196,8 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
}
assert(data != NULL);
ret = rte_malloc_socket(__func__, size, alignment, socket);
- DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
+ if (!ret && size)
+ rte_errno = ENOMEM;
return ret;
}
@@ -146,7 +213,6 @@ static void
mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
{
assert(data != NULL);
- DEBUG("Extern free request: %p", ptr);
rte_free(ptr);
}
@@ -165,13 +231,12 @@ mlx5_dev_close(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- priv_lock(priv);
- DEBUG("%p: closing device \"%s\"",
- (void *)dev,
- ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
+ DRV_LOG(DEBUG, "port %u closing device \"%s\"",
+ dev->data->port_id,
+ ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
/* In case mlx5_dev_stop() has not been called. */
- priv_dev_interrupt_handler_uninstall(priv, dev);
- priv_dev_traffic_disable(priv, dev);
+ mlx5_dev_interrupt_handler_uninstall(dev);
+ mlx5_traffic_disable(dev);
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
@@ -179,7 +244,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->rxqs_n); ++i)
- mlx5_priv_rxq_release(priv, i);
+ mlx5_rxq_release(dev, i);
priv->rxqs_n = 0;
priv->rxqs = NULL;
}
@@ -187,10 +252,13 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_tx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->txqs_n); ++i)
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(dev, i);
priv->txqs_n = 0;
priv->txqs = NULL;
}
+ mlx5_flow_delete_drop_queue(dev);
+ mlx5_mprq_free_mp(dev);
+ mlx5_mr_release(dev);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
claim_zero(mlx5_glue->dealloc_pd(priv->pd));
@@ -202,32 +270,39 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
if (priv->primary_socket)
- priv_socket_uninit(priv);
- ret = mlx5_priv_hrxq_ibv_verify(priv);
- if (ret)
- WARN("%p: some Hash Rx queue still remain", (void *)priv);
- ret = mlx5_priv_ind_table_ibv_verify(priv);
+ mlx5_socket_uninit(dev);
+ if (priv->config.vf)
+ mlx5_nl_mac_addr_flush(dev);
+ if (priv->nl_socket >= 0)
+ close(priv->nl_socket);
+ ret = mlx5_hrxq_ibv_verify(dev);
if (ret)
- WARN("%p: some Indirection table still remain", (void *)priv);
- ret = mlx5_priv_rxq_ibv_verify(priv);
+ DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
+ dev->data->port_id);
+ ret = mlx5_ind_table_ibv_verify(dev);
if (ret)
- WARN("%p: some Verbs Rx queue still remain", (void *)priv);
- ret = mlx5_priv_rxq_verify(priv);
+ DRV_LOG(WARNING, "port %u some indirection table still remain",
+ dev->data->port_id);
+ ret = mlx5_rxq_ibv_verify(dev);
if (ret)
- WARN("%p: some Rx Queues still remain", (void *)priv);
- ret = mlx5_priv_txq_ibv_verify(priv);
+ DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
+ dev->data->port_id);
+ ret = mlx5_rxq_verify(dev);
if (ret)
- WARN("%p: some Verbs Tx queue still remain", (void *)priv);
- ret = mlx5_priv_txq_verify(priv);
+ DRV_LOG(WARNING, "port %u some Rx queues still remain",
+ dev->data->port_id);
+ ret = mlx5_txq_ibv_verify(dev);
if (ret)
- WARN("%p: some Tx Queues still remain", (void *)priv);
- ret = priv_flow_verify(priv);
+ DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
+ dev->data->port_id);
+ ret = mlx5_txq_verify(dev);
if (ret)
- WARN("%p: some flows still remain", (void *)priv);
- ret = priv_mr_verify(priv);
+ DRV_LOG(WARNING, "port %u some Tx queues still remain",
+ dev->data->port_id);
+ ret = mlx5_flow_verify(dev);
if (ret)
- WARN("%p: some Memory Region still remain", (void *)priv);
- priv_unlock(priv);
+ DRV_LOG(WARNING, "port %u some flows still remain",
+ dev->data->port_id);
memset(priv, 0, sizeof(*priv));
}
@@ -260,6 +335,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
.mac_addr_remove = mlx5_mac_addr_remove,
.mac_addr_add = mlx5_mac_addr_add,
.mac_addr_set = mlx5_mac_addr_set,
+ .set_mc_addr_list = mlx5_set_mc_addr_list,
.mtu_set = mlx5_dev_set_mtu,
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
@@ -312,6 +388,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.mac_addr_remove = mlx5_mac_addr_remove,
.mac_addr_add = mlx5_mac_addr_add,
.mac_addr_set = mlx5_mac_addr_set,
+ .set_mc_addr_list = mlx5_set_mc_addr_list,
.mtu_set = mlx5_dev_set_mtu,
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
@@ -367,7 +444,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
* User data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_args_check(const char *key, const char *val, void *opaque)
@@ -378,11 +455,20 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
errno = 0;
tmp = strtoul(val, NULL, 0);
if (errno) {
- WARN("%s: \"%s\" is not a valid integer", key, val);
- return errno;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
+ return -rte_errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
config->cqe_comp = !!tmp;
+ } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
+ config->mprq.enabled = !!tmp;
+ } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
+ config->mprq.stride_num_n = tmp;
+ } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
+ config->mprq.max_memcpy_len = tmp;
+ } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
+ config->mprq.min_rxqs_num = tmp;
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
config->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
@@ -397,9 +483,14 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
config->rx_vec_en = !!tmp;
+ } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
+ config->l3_vxlan_en = !!tmp;
+ } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
+ config->vf_nl_en = !!tmp;
} else {
- WARN("%s: unknown parameter", key);
- return -EINVAL;
+ DRV_LOG(WARNING, "%s: unknown parameter", key);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
return 0;
}
@@ -413,13 +504,17 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
* Device arguments structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
{
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
+ MLX5_RX_MPRQ_EN,
+ MLX5_RX_MPRQ_LOG_STRIDE_NUM,
+ MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
+ MLX5_RXQS_MIN_MPRQ,
MLX5_TXQ_INLINE,
MLX5_TXQS_MIN_INLINE,
MLX5_TXQ_MPW_EN,
@@ -427,6 +522,8 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
MLX5_TXQ_MAX_INLINE_LEN,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
+ MLX5_L3_VXLAN_EN,
+ MLX5_VF_NL_EN,
NULL,
};
struct rte_kvargs *kvlist;
@@ -444,9 +541,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
mlx5_args_check, config);
- if (ret != 0) {
+ if (ret) {
+ rte_errno = EINVAL;
rte_kvargs_free(kvlist);
- return ret;
+ return -rte_errno;
}
}
}
@@ -465,50 +563,60 @@ static struct rte_pci_driver mlx5_driver;
*/
static void *uar_base;
+static int
+find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ void **addr = arg;
+
+ if (*addr == NULL)
+ *addr = ms->addr;
+ else
+ *addr = RTE_MIN(*addr, ms->addr);
+
+ return 0;
+}
+
/**
* Reserve UAR address space for primary process.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_uar_init_primary(struct priv *priv)
+mlx5_uar_init_primary(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
void *addr = (void *)0;
- int i;
- const struct rte_mem_config *mcfg;
- int ret;
if (uar_base) { /* UAR address space mapped. */
priv->uar_base = uar_base;
return 0;
}
/* find out lower bound of hugepage segments */
- mcfg = rte_eal_get_configuration()->mem_config;
- for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) {
- if (addr)
- addr = RTE_MIN(addr, mcfg->memseg[i].addr);
- else
- addr = mcfg->memseg[i].addr;
- }
+ rte_memseg_walk(find_lower_va_bound, &addr);
+
/* keep distance to hugepages to minimize potential conflicts. */
addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
/* anonymous mmap, no real memory consumption. */
addr = mmap(addr, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ERROR("Failed to reserve UAR address space, please adjust "
- "MLX5_UAR_SIZE or try --base-virtaddr");
- ret = ENOMEM;
- return ret;
+ DRV_LOG(ERR,
+ "port %u failed to reserve UAR address space, please"
+ " adjust MLX5_UAR_SIZE or try --base-virtaddr",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
/* Accept either same addr or a new addr returned from mmap if target
* range occupied.
*/
- INFO("Reserved UAR address space: %p", addr);
+ DRV_LOG(INFO, "port %u reserved UAR address space: %p",
+ dev->data->port_id, addr);
priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
uar_base = addr; /* process local, don't reserve again. */
return 0;
@@ -518,17 +626,17 @@ priv_uar_init_primary(struct priv *priv)
* Reserve UAR address space for secondary process, align with
* primary process.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_uar_init_secondary(struct priv *priv)
+mlx5_uar_init_secondary(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
void *addr;
- int ret;
assert(priv->uar_base);
if (uar_base) { /* already reserved. */
@@ -539,20 +647,23 @@ priv_uar_init_secondary(struct priv *priv)
addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ERROR("UAR mmap failed: %p size: %llu",
- priv->uar_base, MLX5_UAR_SIZE);
- ret = ENXIO;
- return ret;
+ DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+ rte_errno = ENXIO;
+ return -rte_errno;
}
if (priv->uar_base != addr) {
- ERROR("UAR address %p size %llu occupied, please adjust "
- "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
- priv->uar_base, MLX5_UAR_SIZE);
- ret = ENXIO;
- return ret;
+ DRV_LOG(ERR,
+ "port %u UAR address %p size %llu occupied, please"
+ " adjust MLX5_UAR_OFFSET or try EAL parameter"
+ " --base-virtaddr",
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+ rte_errno = ENXIO;
+ return -rte_errno;
}
uar_base = addr; /* process local, don't reserve again */
- INFO("Reserved UAR address space: %p", addr);
+ DRV_LOG(INFO, "port %u reserved UAR address space: %p",
+ dev->data->port_id, addr);
return 0;
}
@@ -568,45 +679,57 @@ priv_uar_init_secondary(struct priv *priv)
* PCI device information.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
{
- struct ibv_device **list;
+ struct ibv_device **list = NULL;
struct ibv_device *ibv_dev;
int err = 0;
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr_ex device_attr;
- unsigned int sriov;
+ unsigned int vf = 0;
unsigned int mps;
unsigned int cqe_comp;
unsigned int tunnel_en = 0;
+ unsigned int mpls_en = 0;
+ unsigned int swp = 0;
+ unsigned int verb_priorities = 0;
+ unsigned int mprq = 0;
+ unsigned int mprq_min_stride_size_n = 0;
+ unsigned int mprq_max_stride_size_n = 0;
+ unsigned int mprq_min_stride_num_n = 0;
+ unsigned int mprq_max_stride_num_n = 0;
int idx;
int i;
- struct mlx5dv_context attrs_out;
+ struct mlx5dv_context attrs_out = {0};
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
struct ibv_counter_set_description cs_desc;
#endif
- (void)pci_drv;
+ /* Prepare shared data between primary and secondary process. */
+ mlx5_prepare_shared_data();
assert(pci_drv == &mlx5_driver);
/* Get mlx5_dev[] index. */
idx = mlx5_dev_idx(&pci_dev->addr);
if (idx == -1) {
- ERROR("this driver cannot support any more adapters");
- return -ENOMEM;
+ DRV_LOG(ERR, "this driver cannot support any more adapters");
+ err = ENOMEM;
+ goto error;
}
- DEBUG("using driver device index %d", idx);
-
+ DRV_LOG(DEBUG, "using driver device index %d", idx);
/* Save PCI address. */
mlx5_dev[idx].pci_addr = pci_dev->addr;
list = mlx5_glue->get_device_list(&i);
if (list == NULL) {
assert(errno);
+ err = errno;
if (errno == ENOSYS)
- ERROR("cannot list devices, is ib_uverbs loaded?");
- return -errno;
+ DRV_LOG(ERR,
+ "cannot list devices, is ib_uverbs loaded?");
+ goto error;
}
assert(i >= 0);
/*
@@ -617,7 +740,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct rte_pci_addr pci_addr;
--i;
- DEBUG("checking device \"%s\"", list[i]->name);
+ DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name);
if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
continue;
if ((pci_dev->addr.domain != pci_addr.domain) ||
@@ -625,7 +748,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
(pci_dev->addr.devid != pci_addr.devid) ||
(pci_dev->addr.function != pci_addr.function))
continue;
- sriov = ((pci_dev->id.device_id ==
+ DRV_LOG(INFO, "PCI information matches, using device \"%s\"",
+ list[i]->name);
+ vf = ((pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
(pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
@@ -633,70 +758,121 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
(pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
- switch (pci_dev->id.device_id) {
- case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
- tunnel_en = 1;
- break;
- case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
- tunnel_en = 1;
- break;
- default:
- break;
- }
- INFO("PCI information matches, using device \"%s\""
- " (SR-IOV: %s)",
- list[i]->name,
- sriov ? "true" : "false");
attr_ctx = mlx5_glue->open_device(list[i]);
- err = errno;
+ rte_errno = errno;
+ err = rte_errno;
break;
}
if (attr_ctx == NULL) {
- mlx5_glue->free_device_list(list);
switch (err) {
case 0:
- ERROR("cannot access device, is mlx5_ib loaded?");
- return -ENODEV;
+ DRV_LOG(ERR,
+ "cannot access device, is mlx5_ib loaded?");
+ err = ENODEV;
+ break;
case EINVAL:
- ERROR("cannot use device, are drivers up to date?");
- return -EINVAL;
+ DRV_LOG(ERR,
+ "cannot use device, are drivers up to date?");
+ break;
}
- assert(err > 0);
- return -err;
+ goto error;
}
ibv_dev = list[i];
-
- DEBUG("device opened");
+ DRV_LOG(DEBUG, "device opened");
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+ attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
+#endif
/*
* Multi-packet send is supported by ConnectX-4 Lx PF as well
* as all ConnectX-5 devices.
*/
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
+#endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
+#endif
mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
- DEBUG("Enhanced MPW is supported");
+ DRV_LOG(DEBUG, "enhanced MPW is supported");
mps = MLX5_MPW_ENHANCED;
} else {
- DEBUG("MPW is supported");
+ DRV_LOG(DEBUG, "MPW is supported");
mps = MLX5_MPW;
}
} else {
- DEBUG("MPW isn't supported");
+ DRV_LOG(DEBUG, "MPW isn't supported");
mps = MLX5_MPW_DISABLED;
}
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+ if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+ swp = attrs_out.sw_parsing_caps.sw_parsing_offloads;
+ DRV_LOG(DEBUG, "SWP support: %u", swp);
+#endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+ struct mlx5dv_striding_rq_caps mprq_caps =
+ attrs_out.striding_rq_caps;
+
+ DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
+ mprq_caps.min_single_stride_log_num_of_bytes);
+ DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
+ mprq_caps.max_single_stride_log_num_of_bytes);
+ DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
+ mprq_caps.min_single_wqe_log_num_of_strides);
+ DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
+ mprq_caps.max_single_wqe_log_num_of_strides);
+ DRV_LOG(DEBUG, "\tsupported_qpts: %d",
+ mprq_caps.supported_qpts);
+ DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
+ mprq = 1;
+ mprq_min_stride_size_n =
+ mprq_caps.min_single_stride_log_num_of_bytes;
+ mprq_max_stride_size_n =
+ mprq_caps.max_single_stride_log_num_of_bytes;
+ mprq_min_stride_num_n =
+ mprq_caps.min_single_wqe_log_num_of_strides;
+ mprq_max_stride_num_n =
+ mprq_caps.max_single_wqe_log_num_of_strides;
+ }
+#endif
if (RTE_CACHE_LINE_SIZE == 128 &&
!(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
cqe_comp = 0;
else
cqe_comp = 1;
- if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr))
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+ tunnel_en = ((attrs_out.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
+ (attrs_out.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
+ }
+ DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
+ tunnel_en ? "" : "not ");
+#else
+ DRV_LOG(WARNING,
+ "tunnel offloading disabled due to old OFED/rdma-core version");
+#endif
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ mpls_en = ((attrs_out.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
+ (attrs_out.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
+ DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
+ mpls_en ? "" : "not ");
+#else
+ DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
+ " old OFED/rdma-core version or firmware configuration");
+#endif
+ err = mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr);
+ if (err) {
+ DEBUG("ibv_query_device_ex() failed");
goto error;
- INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
-
+ }
+ DRV_LOG(INFO, "%u port(s) detected",
+ device_attr.orig_attr.phys_port_cnt);
for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
char name[RTE_ETH_NAME_MAX_LEN];
int len;
@@ -706,21 +882,29 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_port_attr port_attr;
struct ibv_pd *pd = NULL;
struct priv *priv = NULL;
- struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev *eth_dev = NULL;
struct ibv_device_attr_ex device_attr_ex;
struct ether_addr mac;
- uint16_t num_vfs = 0;
- struct ibv_device_attr_ex device_attr;
struct mlx5_dev_config config = {
.cqe_comp = cqe_comp,
.mps = mps,
.tunnel_en = tunnel_en,
+ .mpls_en = mpls_en,
.tx_vec_en = 1,
.rx_vec_en = 1,
.mpw_hdr_dseg = 0,
.txq_inline = MLX5_ARG_UNSET,
.txqs_inline = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET,
+ .vf_nl_en = 1,
+ .swp = !!swp,
+ .mprq = {
+ .enabled = 0, /* Disabled by default. */
+ .stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
+ mprq_min_stride_num_n),
+ .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
+ .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
+ },
};
len = snprintf(name, sizeof(name), PCI_PRI_FMT,
@@ -728,94 +912,87 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
pci_dev->addr.devid, pci_dev->addr.function);
if (device_attr.orig_attr.phys_port_cnt > 1)
snprintf(name + len, sizeof(name), " port %u", i);
-
mlx5_dev[idx].ports |= test;
-
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (eth_dev == NULL) {
- ERROR("can not attach rte ethdev");
- err = ENOMEM;
+ DRV_LOG(ERR, "can not attach rte ethdev");
+ rte_errno = ENOMEM;
+ err = rte_errno;
goto error;
}
eth_dev->device = &pci_dev->device;
eth_dev->dev_ops = &mlx5_dev_sec_ops;
- priv = eth_dev->data->dev_private;
- err = priv_uar_init_secondary(priv);
- if (err < 0) {
- err = -err;
+ err = mlx5_uar_init_secondary(eth_dev);
+ if (err) {
+ err = rte_errno;
goto error;
}
/* Receive command fd from primary process */
- err = priv_socket_connect(priv);
+ err = mlx5_socket_connect(eth_dev);
if (err < 0) {
- err = -err;
+ err = rte_errno;
goto error;
}
/* Remap UAR for Tx queues. */
- err = priv_tx_uar_remap(priv, err);
- if (err)
+ err = mlx5_tx_uar_remap(eth_dev, err);
+ if (err) {
+ err = rte_errno;
goto error;
+ }
/*
* Ethdev pointer is still required as input since
* the primary device is not accessible from the
* secondary process.
*/
eth_dev->rx_pkt_burst =
- priv_select_rx_function(priv, eth_dev);
+ mlx5_select_rx_function(eth_dev);
eth_dev->tx_pkt_burst =
- priv_select_tx_function(priv, eth_dev);
+ mlx5_select_tx_function(eth_dev);
+ rte_eth_dev_probing_finish(eth_dev);
continue;
}
-
- DEBUG("using port %u (%08" PRIx32 ")", port, test);
-
+ DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test);
ctx = mlx5_glue->open_device(ibv_dev);
if (ctx == NULL) {
err = ENODEV;
goto port_error;
}
-
- mlx5_glue->query_device_ex(ctx, NULL, &device_attr);
/* Check port status. */
err = mlx5_glue->query_port(ctx, port, &port_attr);
if (err) {
- ERROR("port query failed: %s", strerror(err));
+ DRV_LOG(ERR, "port query failed: %s", strerror(err));
goto port_error;
}
-
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
- ERROR("port %d is not configured in Ethernet mode",
- port);
+ DRV_LOG(ERR,
+ "port %d is not configured in Ethernet mode",
+ port);
err = EINVAL;
goto port_error;
}
-
if (port_attr.state != IBV_PORT_ACTIVE)
- DEBUG("port %d is not active: \"%s\" (%d)",
- port, mlx5_glue->port_state_str(port_attr.state),
- port_attr.state);
-
+ DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)",
+ port,
+ mlx5_glue->port_state_str(port_attr.state),
+ port_attr.state);
/* Allocate protection domain. */
pd = mlx5_glue->alloc_pd(ctx);
if (pd == NULL) {
- ERROR("PD allocation failure");
+ DRV_LOG(ERR, "PD allocation failure");
err = ENOMEM;
goto port_error;
}
-
mlx5_dev[idx].ports |= test;
-
/* from rte_ethdev.c */
priv = rte_zmalloc("ethdev private structure",
sizeof(*priv),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- ERROR("priv allocation failure");
+ DRV_LOG(ERR, "priv allocation failure");
err = ENOMEM;
goto port_error;
}
-
priv->ctx = ctx;
strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
sizeof(priv->ibdev_path));
@@ -825,34 +1002,27 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->mtu = ETHER_MTU;
err = mlx5_args(&config, pci_dev->device.devargs);
if (err) {
- ERROR("failed to process device arguments: %s",
- strerror(err));
+ DRV_LOG(ERR, "failed to process device arguments: %s",
+ strerror(err));
+ err = rte_errno;
goto port_error;
}
- if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) {
- ERROR("ibv_query_device_ex() failed");
+ err = mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex);
+ if (err) {
+ DRV_LOG(ERR, "ibv_query_device_ex() failed");
goto port_error;
}
-
config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
IBV_DEVICE_RAW_IP_CSUM);
- DEBUG("checksum offloading is %ssupported",
- (config.hw_csum ? "" : "not "));
-
-#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
- config.hw_csum_l2tun =
- !!(exp_device_attr.exp_device_cap_flags &
- IBV_DEVICE_VXLAN_SUPPORT);
-#endif
- DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
- (config.hw_csum_l2tun ? "" : "not "));
-
+ DRV_LOG(DEBUG, "checksum offloading is %ssupported",
+ (config.hw_csum ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
config.flow_counter_en = !!(device_attr.max_counter_sets);
mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
- DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
- cs_desc.counter_type, cs_desc.num_of_cs,
- cs_desc.attributes);
+ DRV_LOG(DEBUG,
+ "counter type = %d, num of cs = %ld, attributes = %d",
+ cs_desc.counter_type, cs_desc.num_of_cs,
+ cs_desc.attributes);
#endif
config.ind_table_max_size =
device_attr_ex.rss_caps.max_rwq_indirection_table_size;
@@ -861,26 +1031,25 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (config.ind_table_max_size >
(unsigned int)ETH_RSS_RETA_SIZE_512)
config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
- DEBUG("maximum RX indirection table size is %u",
- config.ind_table_max_size);
+ DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
+ config.ind_table_max_size);
config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
- DEBUG("VLAN stripping is %ssupported",
- (config.hw_vlan_strip ? "" : "not "));
+ DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
+ (config.hw_vlan_strip ? "" : "not "));
config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_SCATTER_FCS);
- DEBUG("FCS stripping configuration is %ssupported",
- (config.hw_fcs_strip ? "" : "not "));
+ DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
+ (config.hw_fcs_strip ? "" : "not "));
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
#endif
- DEBUG("hardware RX end alignment padding is %ssupported",
- (config.hw_padding ? "" : "not "));
-
- priv_get_num_vfs(priv, &num_vfs);
- config.sriov = (num_vfs || sriov);
+ DRV_LOG(DEBUG,
+ "hardware Rx end alignment padding is %ssupported",
+ (config.hw_padding ? "" : "not "));
+ config.vf = vf;
config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
(device_attr_ex.tso_caps.supported_qpts &
(1 << IBV_QPT_RAW_PACKET)));
@@ -888,71 +1057,106 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
config.tso_max_payload_sz =
device_attr_ex.tso_caps.max_tso;
if (config.mps && !mps) {
- ERROR("multi-packet send not supported on this device"
- " (" MLX5_TXQ_MPW_EN ")");
+ DRV_LOG(ERR,
+ "multi-packet send not supported on this device"
+ " (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
}
- INFO("%sMPS is %s",
- config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
- config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ DRV_LOG(INFO, "%s MPS is %s",
+ config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
+ config.mps != MLX5_MPW_DISABLED ? "enabled" :
+ "disabled");
if (config.cqe_comp && !cqe_comp) {
- WARN("Rx CQE compression isn't supported");
+ DRV_LOG(WARNING, "Rx CQE compression isn't supported");
config.cqe_comp = 0;
}
- err = priv_uar_init_primary(priv);
- if (err)
+ config.mprq.enabled = config.mprq.enabled && mprq;
+ if (config.mprq.enabled) {
+ if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
+ config.mprq.stride_num_n < mprq_min_stride_num_n) {
+ config.mprq.stride_num_n =
+ RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
+ mprq_min_stride_num_n);
+ DRV_LOG(WARNING,
+ "the number of strides"
+ " for Multi-Packet RQ is out of range,"
+ " setting default value (%u)",
+ 1 << config.mprq.stride_num_n);
+ }
+ config.mprq.min_stride_size_n = mprq_min_stride_size_n;
+ config.mprq.max_stride_size_n = mprq_max_stride_size_n;
+ }
+ eth_dev = rte_eth_dev_allocate(name);
+ if (eth_dev == NULL) {
+ DRV_LOG(ERR, "can not allocate rte ethdev");
+ err = ENOMEM;
goto port_error;
+ }
+ eth_dev->data->dev_private = priv;
+ priv->dev_data = eth_dev->data;
+ eth_dev->data->mac_addrs = priv->mac;
+ eth_dev->device = &pci_dev->device;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->device->driver = &mlx5_driver.driver;
+ err = mlx5_uar_init_primary(eth_dev);
+ if (err) {
+ err = rte_errno;
+ goto port_error;
+ }
/* Configure the first MAC address by default. */
- if (priv_get_mac(priv, &mac.addr_bytes)) {
- ERROR("cannot get MAC address, is mlx5_en loaded?"
- " (errno: %s)", strerror(errno));
+ if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
+ DRV_LOG(ERR,
+ "port %u cannot get MAC address, is mlx5_en"
+ " loaded? (errno: %s)",
+ eth_dev->data->port_id, strerror(errno));
err = ENODEV;
goto port_error;
}
- INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
- priv->port,
- mac.addr_bytes[0], mac.addr_bytes[1],
- mac.addr_bytes[2], mac.addr_bytes[3],
- mac.addr_bytes[4], mac.addr_bytes[5]);
+ DRV_LOG(INFO,
+ "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
+ eth_dev->data->port_id,
+ mac.addr_bytes[0], mac.addr_bytes[1],
+ mac.addr_bytes[2], mac.addr_bytes[3],
+ mac.addr_bytes[4], mac.addr_bytes[5]);
#ifndef NDEBUG
{
char ifname[IF_NAMESIZE];
- if (priv_get_ifname(priv, &ifname) == 0)
- DEBUG("port %u ifname is \"%s\"",
- priv->port, ifname);
+ if (mlx5_get_ifname(eth_dev, &ifname) == 0)
+ DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
+ eth_dev->data->port_id, ifname);
else
- DEBUG("port %u ifname is unknown", priv->port);
+ DRV_LOG(DEBUG, "port %u ifname is unknown",
+ eth_dev->data->port_id);
}
#endif
/* Get actual MTU if possible. */
- priv_get_mtu(priv, &priv->mtu);
- DEBUG("port %u MTU is %u", priv->port, priv->mtu);
-
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL) {
- ERROR("can not allocate rte ethdev");
- err = ENOMEM;
+ err = mlx5_get_mtu(eth_dev, &priv->mtu);
+ if (err) {
+ err = rte_errno;
goto port_error;
}
- eth_dev->data->dev_private = priv;
- eth_dev->data->mac_addrs = priv->mac;
- eth_dev->device = &pci_dev->device;
- rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->device->driver = &mlx5_driver.driver;
+ DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
+ priv->mtu);
/*
* Initialize burst functions to prevent crashes before link-up.
*/
eth_dev->rx_pkt_burst = removed_rx_burst;
eth_dev->tx_pkt_burst = removed_tx_burst;
- priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
/* Register MAC address. */
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
+ priv->nl_socket = -1;
+ priv->nl_sn = 0;
+ if (vf && config.vf_nl_en) {
+ priv->nl_socket = mlx5_nl_init(RTMGRP_LINK);
+ if (priv->nl_socket < 0)
+ priv->nl_socket = -1;
+ mlx5_nl_mac_addr_sync(eth_dev);
+ }
TAILQ_INIT(&priv->flows);
TAILQ_INIT(&priv->ctrl_flows);
-
/* Hint libmlx5 to use PMD allocator for data plane resources */
struct mlx5dv_ctx_allocators alctr = {
.alloc = &mlx5_alloc_verbs_buf,
@@ -962,14 +1166,55 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
mlx5_glue->dv_set_context_attr(ctx,
MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
(void *)((uintptr_t)&alctr));
-
/* Bring Ethernet device up. */
- DEBUG("forcing Ethernet interface up");
- priv_set_flags(priv, ~IFF_UP, IFF_UP);
+ DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
+ eth_dev->data->port_id);
+ mlx5_set_link_up(eth_dev);
+ /*
+ * Even though the interrupt handler is not installed yet,
+ * interrupts will still trigger on the asyn_fd from
+ * Verbs context returned by ibv_open_device().
+ */
+ mlx5_link_update(eth_dev, 0);
/* Store device configuration on private structure. */
priv->config = config;
+ /* Create drop queue. */
+ err = mlx5_flow_create_drop_queue(eth_dev);
+ if (err) {
+ DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
+ eth_dev->data->port_id, strerror(rte_errno));
+ err = rte_errno;
+ goto port_error;
+ }
+ /* Supported Verbs flow priority number detection. */
+ if (verb_priorities == 0)
+ verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
+ if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
+ DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
+ eth_dev->data->port_id, verb_priorities);
+ goto port_error;
+ }
+ priv->config.max_verbs_prio = verb_priorities;
+ /*
+ * Once the device is added to the list of memory event
+ * callback, its global MR cache table cannot be expanded
+ * on the fly because of deadlock. If it overflows, lookup
+ * should be done by searching MR list linearly, which is slow.
+ */
+ err = mlx5_mr_btree_init(&priv->mr.cache,
+ MLX5_MR_BTREE_CACHE_N * 2,
+ eth_dev->device->numa_node);
+ if (err) {
+ err = rte_errno;
+ goto port_error;
+ }
+ /* Add device to memory callback list. */
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
+ priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ rte_eth_dev_probing_finish(eth_dev);
continue;
-
port_error:
if (priv)
rte_free(priv);
@@ -977,29 +1222,31 @@ port_error:
claim_zero(mlx5_glue->dealloc_pd(pd));
if (ctx)
claim_zero(mlx5_glue->close_device(ctx));
+ if (eth_dev && rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_eth_dev_release_port(eth_dev);
break;
}
-
/*
* XXX if something went wrong in the loop above, there is a resource
* leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
* long as the dpdk does not provide a way to deallocate a ethdev and a
* way to enumerate the registered ethdevs to free the previous ones.
*/
-
/* no port found, complain */
if (!mlx5_dev[idx].ports) {
- err = ENODEV;
- goto error;
+ rte_errno = ENODEV;
+ err = rte_errno;
}
-
error:
if (attr_ctx)
claim_zero(mlx5_glue->close_device(attr_ctx));
if (list)
mlx5_glue->free_device_list(list);
- assert(err >= 0);
- return -err;
+ if (err) {
+ rte_errno = err;
+ return -rte_errno;
+ }
+ return 0;
}
static const struct rte_pci_id mlx5_pci_id_map[] = {
@@ -1036,6 +1283,10 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
},
{
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
+ },
+ {
.vendor_id = 0
}
};
@@ -1052,11 +1303,54 @@ static struct rte_pci_driver mlx5_driver = {
#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
/**
+ * Suffix RTE_EAL_PMD_PATH with "-glue".
+ *
+ * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * suffixing its last component.
+ *
+ * @param buf[out]
+ * Output buffer, should be large enough otherwise NULL is returned.
+ * @param size
+ * Size of @p out.
+ *
+ * @return
+ * Pointer to @p buf or @p NULL in case suffix cannot be appended.
+ */
+static char *
+mlx5_glue_path(char *buf, size_t size)
+{
+ static const char *const bad[] = { "/", ".", "..", NULL };
+ const char *path = RTE_EAL_PMD_PATH;
+ size_t len = strlen(path);
+ size_t off;
+ int i;
+
+ while (len && path[len - 1] == '/')
+ --len;
+ for (off = len; off && path[off - 1] != '/'; --off)
+ ;
+ for (i = 0; bad[i]; ++i)
+ if (!strncmp(path + off, bad[i], (int)(len - off)))
+ goto error;
+ i = snprintf(buf, size, "%.*s-glue", (int)len, path);
+ if (i == -1 || (size_t)i >= size)
+ goto error;
+ return buf;
+error:
+ DRV_LOG(ERR,
+ "unable to append \"-glue\" to last component of"
+ " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
+ " please re-configure DPDK");
+ return NULL;
+}
+
+/**
* Initialization routine for run-time dependency on rdma-core.
*/
static int
mlx5_glue_init(void)
{
+ char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
const char *path[] = {
/*
* A basic security check is necessary before trusting
@@ -1064,7 +1358,13 @@ mlx5_glue_init(void)
*/
(geteuid() == getuid() && getegid() == getgid() ?
getenv("MLX5_GLUE_PATH") : NULL),
- RTE_EAL_PMD_PATH,
+ /*
+ * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
+ * variant, otherwise let dlopen() look up libraries on its
+ * own.
+ */
+ (*RTE_EAL_PMD_PATH ?
+ mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
};
unsigned int i = 0;
void *handle = NULL;
@@ -1095,7 +1395,8 @@ mlx5_glue_init(void)
break;
if (sizeof(name) != (size_t)ret + 1)
continue;
- DEBUG("looking for rdma-core glue as \"%s\"", name);
+ DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
+ name);
handle = dlopen(name, RTLD_LAZY);
break;
} while (1);
@@ -1107,7 +1408,7 @@ mlx5_glue_init(void)
rte_errno = EINVAL;
dlmsg = dlerror();
if (dlmsg)
- WARN("cannot load glue library: %s", dlmsg);
+ DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
goto glue_error;
}
sym = dlsym(handle, "mlx5_glue");
@@ -1115,7 +1416,7 @@ mlx5_glue_init(void)
rte_errno = EINVAL;
dlmsg = dlerror();
if (dlmsg)
- ERROR("cannot resolve glue symbol: %s", dlmsg);
+ DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
goto glue_error;
}
mlx5_glue = *sym;
@@ -1123,9 +1424,9 @@ mlx5_glue_init(void)
glue_error:
if (handle)
dlclose(handle);
- WARN("cannot initialize PMD due to missing run-time"
- " dependency on rdma-core libraries (libibverbs,"
- " libmlx5)");
+ DRV_LOG(WARNING,
+ "cannot initialize PMD due to missing run-time dependency on"
+ " rdma-core libraries (libibverbs, libmlx5)");
return -rte_errno;
}
@@ -1138,8 +1439,10 @@ RTE_INIT(rte_mlx5_pmd_init);
static void
rte_mlx5_pmd_init(void)
{
- /* Build the static table for ptype conversion. */
+ /* Build the static tables for Verbs conversion. */
mlx5_set_ptype_table();
+ mlx5_set_cksum_table();
+ mlx5_set_swp_types_table();
/*
* RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
* huge pages. Calling ibv_fork_init() during init allows
@@ -1165,8 +1468,9 @@ rte_mlx5_pmd_init(void)
}
#endif
if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
- ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
- mlx5_glue->version, MLX5_GLUE_VERSION);
+ DRV_LOG(ERR,
+ "rdma-core glue \"%s\" mismatch: \"%s\" is required",
+ mlx5_glue->version, MLX5_GLUE_VERSION);
return;
}
mlx5_glue->fork_init();
@@ -1176,3 +1480,11 @@ rte_mlx5_pmd_init(void)
RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
+
+/** Initialize driver log type. */
+RTE_INIT(vdev_netvsc_init_log)
+{
+ mlx5_logtype = rte_log_register("pmd.net.mlx5");
+ if (mlx5_logtype >= 0)
+ rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 965c19f2..997b04a3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_H_
@@ -26,12 +26,13 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
-#include <rte_spinlock.h>
+#include <rte_rwlock.h>
#include <rte_interrupts.h>
#include <rte_errno.h>
#include <rte_flow.h>
#include "mlx5_utils.h"
+#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
@@ -49,8 +50,19 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018,
PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2,
};
+LIST_HEAD(mlx5_dev_list, priv);
+
+/* Shared memory between primary and secondary processes. */
+struct mlx5_shared_data {
+ struct mlx5_dev_list mem_event_cb_list;
+ rte_rwlock_t mem_event_rwlock;
+};
+
+extern struct mlx5_shared_data *mlx5_shared_data;
+
struct mlx5_xstats_ctrl {
/* Number of device stats. */
uint16_t stats_n;
@@ -75,19 +87,34 @@ TAILQ_HEAD(mlx5_flows, rte_flow);
*/
struct mlx5_dev_config {
unsigned int hw_csum:1; /* Checksum offload is supported. */
- unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
- unsigned int sriov:1; /* This is a VF or PF with VF devices. */
+ unsigned int vf:1; /* This is a VF. */
unsigned int mps:2; /* Multi-packet send supported mode. */
- unsigned int tunnel_en:1; /* Whether tunnel is supported. */
+ unsigned int tunnel_en:1;
+ /* Whether tunnel stateless offloads are supported. */
+ unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
+ unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
+ unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
+ unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
+ struct {
+ unsigned int enabled:1; /* Whether MPRQ is enabled. */
+ unsigned int stride_num_n; /* Number of strides. */
+ unsigned int min_stride_size_n; /* Min size of a stride. */
+ unsigned int max_stride_size_n; /* Max size of a stride. */
+ unsigned int max_memcpy_len;
+ /* Maximum packet size to memcpy Rx packets. */
+ unsigned int min_rxqs_num;
+ /* Rx queue count threshold to enable MPRQ. */
+ } mprq; /* Configurations for Multi-Packet RQ. */
+ unsigned int max_verbs_prio; /* Number of Verb flow priorities. */
unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int ind_table_max_size; /* Maximum indirection table size. */
int txq_inline; /* Maximum packet size for inlining. */
@@ -104,6 +131,9 @@ enum mlx5_verbs_alloc_type {
MLX5_VERBS_ALLOC_TYPE_RX_QUEUE,
};
+/* 8 Verbs priorities. */
+#define MLX5_VERBS_FLOW_PRIO_8 8
+
/**
* Verbs allocator needs a context to know in the callback which kind of
* resources it is allocating.
@@ -113,25 +143,30 @@ struct mlx5_verbs_alloc_ctx {
const void *obj; /* Pointer to the DPDK object. */
};
+LIST_HEAD(mlx5_mr_list, mlx5_mr);
+
struct priv {
- struct rte_eth_dev *dev; /* Ethernet device of master process. */
+ LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
+ struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr_ex device_attr; /* Device properties. */
struct ibv_pd *pd; /* Protection Domain. */
char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
+ BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
+ /* Bit-field of MAC addresses owned by the PMD. */
uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */
- unsigned int pending_alarm:1; /* An alarm is pending. */
unsigned int isolated:1; /* Whether isolated mode is enabled. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
+ struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
struct rte_intr_handle intr_handle; /* Interrupt handler. */
unsigned int (*reta_idx)[]; /* RETA index table. */
@@ -139,7 +174,13 @@ struct priv {
struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
struct mlx5_flows flows; /* RTE Flow rules. */
struct mlx5_flows ctrl_flows; /* Control flow rules. */
- LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
+ struct {
+ uint32_t dev_gen; /* Generation number to flush local caches. */
+ rte_rwlock_t rwlock; /* MR Lock. */
+ struct mlx5_mr_btree cache; /* Global MR cache table. */
+ struct mlx5_mr_list mr_list; /* Registered MR list. */
+ struct mlx5_mr_list mr_free_list; /* Freed MR list. */
+ } mr;
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
@@ -149,55 +190,18 @@ struct priv {
LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
- rte_spinlock_t lock; /* Lock for control functions. */
int primary_socket; /* Unix socket for primary process. */
void *uar_base; /* Reserved address space for UAR mapping */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
struct mlx5_dev_config config; /* Device configuration. */
struct mlx5_verbs_alloc_ctx verbs_alloc_ctx;
/* Context for Verbs allocator. */
+ int nl_socket; /* Netlink socket. */
+ uint32_t nl_sn; /* Netlink message sequence number. */
};
-/**
- * Lock private structure to protect it from concurrent access in the
- * control path.
- *
- * @param priv
- * Pointer to private structure.
- */
-static inline void
-priv_lock(struct priv *priv)
-{
- rte_spinlock_lock(&priv->lock);
-}
-
-/**
- * Try to lock private structure to protect it from concurrent access in the
- * control path.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 1 if the lock is successfully taken; 0 otherwise.
- */
-static inline int
-priv_trylock(struct priv *priv)
-{
- return rte_spinlock_trylock(&priv->lock);
-}
-
-/**
- * Unlock private structure.
- *
- * @param priv
- * Pointer to private structure.
- */
-static inline void
-priv_unlock(struct priv *priv)
-{
- rte_spinlock_unlock(&priv->lock);
-}
+#define PORT_ID(priv) ((priv)->dev_data->port_id)
+#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
/* mlx5.c */
@@ -205,131 +209,148 @@ int mlx5_getenv_int(const char *);
/* mlx5_ethdev.c */
-struct priv *mlx5_get_priv(struct rte_eth_dev *dev);
-int mlx5_is_secondary(void);
-int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]);
-int priv_ifreq(const struct priv *, int req, struct ifreq *);
-int priv_is_ib_cntr(const char *);
-int priv_get_cntr_sysfs(struct priv *, const char *, uint64_t *);
-int priv_get_num_vfs(struct priv *, uint16_t *);
-int priv_get_mtu(struct priv *, uint16_t *);
-int priv_set_flags(struct priv *, unsigned int, unsigned int);
-int mlx5_dev_configure(struct rte_eth_dev *);
-void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *);
+int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]);
+int mlx5_ifindex(const struct rte_eth_dev *dev);
+int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr);
+int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
+int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
+ unsigned int flags);
+int mlx5_dev_configure(struct rte_eth_dev *dev);
+void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
-int priv_link_update(struct priv *, int);
-int priv_force_link_status_change(struct priv *, int);
-int mlx5_link_update(struct rte_eth_dev *, int);
-int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t);
-int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
-int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
-int mlx5_ibv_device_to_pci_addr(const struct ibv_device *,
- struct rte_pci_addr *);
-void mlx5_dev_link_status_handler(void *);
-void mlx5_dev_interrupt_handler(void *);
-void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
-void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status);
+int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
+ struct rte_pci_addr *pci_addr);
+void mlx5_dev_link_status_handler(void *arg);
+void mlx5_dev_interrupt_handler(void *arg);
+void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev);
+void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev);
int mlx5_set_link_down(struct rte_eth_dev *dev);
int mlx5_set_link_up(struct rte_eth_dev *dev);
int mlx5_is_removed(struct rte_eth_dev *dev);
-eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *);
-eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *);
+eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
+eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev);
/* mlx5_mac.c */
-int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]);
-void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t);
-int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t,
- uint32_t);
-void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *);
+int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]);
+void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
+int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index, uint32_t vmdq);
+int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
+int mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set, uint32_t nb_mc_addr);
/* mlx5_rss.c */
-int mlx5_rss_hash_update(struct rte_eth_dev *, struct rte_eth_rss_conf *);
-int mlx5_rss_hash_conf_get(struct rte_eth_dev *, struct rte_eth_rss_conf *);
-int priv_rss_reta_index_resize(struct priv *, unsigned int);
-int mlx5_dev_rss_reta_query(struct rte_eth_dev *,
- struct rte_eth_rss_reta_entry64 *, uint16_t);
-int mlx5_dev_rss_reta_update(struct rte_eth_dev *,
- struct rte_eth_rss_reta_entry64 *, uint16_t);
+int mlx5_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size);
+int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
/* mlx5_rxmode.c */
-void mlx5_promiscuous_enable(struct rte_eth_dev *);
-void mlx5_promiscuous_disable(struct rte_eth_dev *);
-void mlx5_allmulticast_enable(struct rte_eth_dev *);
-void mlx5_allmulticast_disable(struct rte_eth_dev *);
+void mlx5_promiscuous_enable(struct rte_eth_dev *dev);
+void mlx5_promiscuous_disable(struct rte_eth_dev *dev);
+void mlx5_allmulticast_enable(struct rte_eth_dev *dev);
+void mlx5_allmulticast_disable(struct rte_eth_dev *dev);
/* mlx5_stats.c */
-void priv_xstats_init(struct priv *);
-int mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
-void mlx5_stats_reset(struct rte_eth_dev *);
-int mlx5_xstats_get(struct rte_eth_dev *,
- struct rte_eth_xstat *, unsigned int);
-void mlx5_xstats_reset(struct rte_eth_dev *);
-int mlx5_xstats_get_names(struct rte_eth_dev *,
- struct rte_eth_xstat_name *, unsigned int);
+void mlx5_xstats_init(struct rte_eth_dev *dev);
+int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+void mlx5_stats_reset(struct rte_eth_dev *dev);
+int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ unsigned int n);
+void mlx5_xstats_reset(struct rte_eth_dev *dev);
+int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int n);
/* mlx5_vlan.c */
-int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int);
-int mlx5_vlan_offload_set(struct rte_eth_dev *, int);
-void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int);
+int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on);
+int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
/* mlx5_trigger.c */
-int mlx5_dev_start(struct rte_eth_dev *);
-void mlx5_dev_stop(struct rte_eth_dev *);
-int priv_dev_traffic_enable(struct priv *, struct rte_eth_dev *);
-int priv_dev_traffic_disable(struct priv *, struct rte_eth_dev *);
-int priv_dev_traffic_restart(struct priv *, struct rte_eth_dev *);
-int mlx5_traffic_restart(struct rte_eth_dev *);
+int mlx5_dev_start(struct rte_eth_dev *dev);
+void mlx5_dev_stop(struct rte_eth_dev *dev);
+int mlx5_traffic_enable(struct rte_eth_dev *dev);
+void mlx5_traffic_disable(struct rte_eth_dev *dev);
+int mlx5_traffic_restart(struct rte_eth_dev *dev);
/* mlx5_flow.c */
-int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
- enum rte_filter_op, void *);
-int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
- const struct rte_flow_item [],
- const struct rte_flow_action [],
- struct rte_flow_error *);
-struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
- const struct rte_flow_attr *,
- const struct rte_flow_item [],
- const struct rte_flow_action [],
- struct rte_flow_error *);
-int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
- struct rte_flow_error *);
-void priv_flow_flush(struct priv *, struct mlx5_flows *);
-int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
-int mlx5_flow_query(struct rte_eth_dev *, struct rte_flow *,
- enum rte_flow_action_type, void *,
- struct rte_flow_error *);
-int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
-int priv_flow_start(struct priv *, struct mlx5_flows *);
-void priv_flow_stop(struct priv *, struct mlx5_flows *);
-int priv_flow_verify(struct priv *);
-int mlx5_ctrl_flow_vlan(struct rte_eth_dev *, struct rte_flow_item_eth *,
- struct rte_flow_item_eth *, struct rte_flow_item_vlan *,
- struct rte_flow_item_vlan *);
-int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *,
- struct rte_flow_item_eth *);
-int priv_flow_create_drop_queue(struct priv *);
-void priv_flow_delete_drop_queue(struct priv *);
+unsigned int mlx5_get_max_verbs_prio(struct rte_eth_dev *dev);
+int mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error);
+void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *data,
+ struct rte_flow_error *error);
+int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
+ struct rte_flow_error *error);
+int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);
+void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_verify(struct rte_eth_dev *dev);
+int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask);
+int mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask);
+int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
+void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
/* mlx5_socket.c */
-int priv_socket_init(struct priv *priv);
-int priv_socket_uninit(struct priv *priv);
-void priv_socket_handle(struct priv *priv);
-int priv_socket_connect(struct priv *priv);
-
-/* mlx5_mr.c */
-
-struct mlx5_mr *priv_mr_new(struct priv *, struct rte_mempool *);
-struct mlx5_mr *priv_mr_get(struct priv *, struct rte_mempool *);
-int priv_mr_release(struct priv *, struct mlx5_mr *);
-int priv_mr_verify(struct priv *);
+int mlx5_socket_init(struct rte_eth_dev *priv);
+void mlx5_socket_uninit(struct rte_eth_dev *priv);
+void mlx5_socket_handle(struct rte_eth_dev *priv);
+int mlx5_socket_connect(struct rte_eth_dev *priv);
+
+/* mlx5_nl.c */
+
+int mlx5_nl_init(uint32_t nlgroups);
+int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index);
+int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index);
+void mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev);
+void mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev);
+int mlx5_nl_promisc(struct rte_eth_dev *dev, int enable);
+int mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable);
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index c3334ca3..51124cdc 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_DEFS_H_
@@ -13,8 +13,13 @@
/* Reported driver name. */
#define MLX5_DRIVER_NAME "net_mlx5"
+/* Maximum number of simultaneous unicast MAC addresses. */
+#define MLX5_MAX_UC_MAC_ADDRESSES 128
+/* Maximum number of simultaneous Multicast MAC addresses. */
+#define MLX5_MAX_MC_MAC_ADDRESSES 128
/* Maximum number of simultaneous MAC addresses. */
-#define MLX5_MAX_MAC_ADDRESSES 128
+#define MLX5_MAX_MAC_ADDRESSES \
+ (MLX5_MAX_UC_MAC_ADDRESSES + MLX5_MAX_MC_MAC_ADDRESSES)
/* Maximum number of simultaneous VLAN filters. */
#define MLX5_MAX_VLAN_IDS 128
@@ -32,16 +37,11 @@
*/
#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3)
-/*
- * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
- * from which buffers are to be transmitted will have to be mapped by this
- * driver to their own Memory Region (MR). This is a slow operation.
- *
- * This value is always 1 for RX queues.
- */
-#ifndef MLX5_PMD_TX_MP_CACHE
-#define MLX5_PMD_TX_MP_CACHE 8
-#endif
+/* Size of per-queue MR cache array for linear search. */
+#define MLX5_MR_CACHE_N 8
+
+/* Size of MR cache table for binary search. */
+#define MLX5_MR_BTREE_CACHE_N 256
/*
* If defined, only use software counters. The PMD will never ask the hardware
@@ -58,7 +58,7 @@
#define MLX5_MAX_XSTATS 32
/* Maximum Packet headers size (L2+L3+L4) for TSO. */
-#define MLX5_MAX_TSO_HEADER 128
+#define MLX5_MAX_TSO_HEADER 192
/* Default minimum number of Tx queues for vectorized Tx. */
#define MLX5_VPMD_MIN_TXQS 4
@@ -82,8 +82,8 @@
/* Supported RSS */
#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
-/* Maximum number of attempts to query link status before giving up. */
-#define MLX5_MAX_LINK_QUERY_ATTEMPTS 5
+/* Timeout in seconds to get a valid link status. */
+#define MLX5_LINK_STATUS_TIMEOUT 10
/* Reserved address space for UAR mapping. */
#define MLX5_UAR_SIZE (1ULL << 32)
@@ -95,4 +95,22 @@
*/
#define MLX5_UAR_OFFSET (1ULL << 32)
+/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */
+#define MLX5_MPRQ_STRIDE_NUM_N 4U
+
+/* Two-byte shift is disabled for Multi-Packet RQ. */
+#define MLX5_MPRQ_TWO_BYTE_SHIFT 0
+
+/*
+ * Minimum size of packet to be memcpy'd instead of being attached as an
+ * external buffer.
+ */
+#define MLX5_MPRQ_MEMCPY_DEFAULT_LEN 128
+
+/* Minimum number Rx queues to enable Multi-Packet RQ. */
+#define MLX5_MPRQ_MIN_RXQS 12
+
+/* Cache size of mempool for Multi-Packet RQ. */
+#define MLX5_MPRQ_MP_CACHE_SZ 32
+
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 66650769..90488af3 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#define _GNU_SOURCE
#include <stddef.h>
#include <assert.h>
+#include <inttypes.h>
#include <unistd.h>
#include <stdint.h>
#include <stdio.h>
@@ -17,14 +18,13 @@
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
-#include <sys/utsname.h>
#include <netinet/in.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
-#include <linux/version.h>
#include <fcntl.h>
#include <stdalign.h>
#include <sys/un.h>
+#include <time.h>
#include <rte_atomic.h>
#include <rte_ethdev_driver.h>
@@ -32,8 +32,9 @@
#include <rte_mbuf.h>
#include <rte_common.h>
#include <rte_interrupts.h>
-#include <rte_alarm.h>
#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_rwlock.h>
#include "mlx5.h"
#include "mlx5_glue.h"
@@ -94,17 +95,18 @@ struct ethtool_link_settings {
/**
* Get interface name from private structure.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[out] ifname
* Interface name output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
+mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
{
+ struct priv *priv = dev->data->dev_private;
DIR *dir;
struct dirent *dent;
unsigned int dev_type = 0;
@@ -115,8 +117,10 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
MKSTR(path, "%s/device/net", priv->ibdev_path);
dir = opendir(path);
- if (dir == NULL)
- return -1;
+ if (dir == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
}
while ((dent = readdir(dir)) != NULL) {
char *name = dent->d_name;
@@ -163,358 +167,161 @@ try_dev_id:
goto try_dev_id;
dev_port_prev = dev_port;
if (dev_port == (priv->port - 1u))
- snprintf(match, sizeof(match), "%s", name);
+ strlcpy(match, name, sizeof(match));
}
closedir(dir);
- if (match[0] == '\0')
- return -1;
+ if (match[0] == '\0') {
+ rte_errno = ENOENT;
+ return -rte_errno;
+ }
strncpy(*ifname, match, sizeof(*ifname));
return 0;
}
/**
- * Check if the counter is located on ib counters file.
+ * Get the interface index from device name.
*
- * @param[in] cntr
- * Counter name.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 1 if counter is located on ib counters file , 0 otherwise.
+ * Interface index on success, a negative errno value otherwise and
+ * rte_errno is set.
*/
int
-priv_is_ib_cntr(const char *cntr)
-{
- if (!strcmp(cntr, "out_of_buffer"))
- return 1;
- return 0;
-}
-
-/**
- * Read from sysfs entry.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[out] buf
- * Data output buffer.
- * @param size
- * Buffer size.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_sysfs_read(const struct priv *priv, const char *entry,
- char *buf, size_t size)
-{
- char ifname[IF_NAMESIZE];
- FILE *file;
- int ret;
- int err;
-
- if (priv_get_ifname(priv, &ifname))
- return -1;
-
- if (priv_is_ib_cntr(entry)) {
- MKSTR(path, "%s/ports/1/hw_counters/%s",
- priv->ibdev_path, entry);
- file = fopen(path, "rb");
- } else {
- MKSTR(path, "%s/device/net/%s/%s",
- priv->ibdev_path, ifname, entry);
- file = fopen(path, "rb");
- }
- if (file == NULL)
- return -1;
- ret = fread(buf, 1, size, file);
- err = errno;
- if (((size_t)ret < size) && (ferror(file)))
- ret = -1;
- else
- ret = size;
- fclose(file);
- errno = err;
- return ret;
-}
-
-/**
- * Write to sysfs entry.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[in] buf
- * Data buffer.
- * @param size
- * Buffer size.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_sysfs_write(const struct priv *priv, const char *entry,
- char *buf, size_t size)
+mlx5_ifindex(const struct rte_eth_dev *dev)
{
char ifname[IF_NAMESIZE];
- FILE *file;
int ret;
- int err;
-
- if (priv_get_ifname(priv, &ifname))
- return -1;
- MKSTR(path, "%s/device/net/%s/%s", priv->ibdev_path, ifname, entry);
-
- file = fopen(path, "wb");
- if (file == NULL)
- return -1;
- ret = fwrite(buf, 1, size, file);
- err = errno;
- if (((size_t)ret < size) || (ferror(file)))
- ret = -1;
- else
- ret = size;
- fclose(file);
- errno = err;
- return ret;
-}
-
-/**
- * Get unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param[out] value
- * Value output buffer.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
-{
- int ret;
- unsigned long value_ret;
- char value_str[32];
-
- ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
- if (ret == -1) {
- DEBUG("cannot read %s value from sysfs: %s",
- name, strerror(errno));
- return -1;
- }
- value_str[ret] = '\0';
- errno = 0;
- value_ret = strtoul(value_str, NULL, 0);
- if (errno) {
- DEBUG("invalid %s value `%s': %s", name, value_str,
- strerror(errno));
- return -1;
- }
- *value = value_ret;
- return 0;
-}
-
-/**
- * Set unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param value
- * Value to set.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
-{
- int ret;
- MKSTR(value_str, "%lu", value);
-
- ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
+ ret = mlx5_get_ifname(dev, &ifname);
+ if (ret)
+ return ret;
+ ret = if_nametoindex(ifname);
if (ret == -1) {
- DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
- name, value_str, value, strerror(errno));
- return -1;
+ rte_errno = errno;
+ return -rte_errno;
}
- return 0;
+ return ret;
}
/**
* Perform ifreq ioctl() on associated Ethernet device.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param req
* Request number to pass to ioctl().
* @param[out] ifr
* Interface request structure output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
+mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
{
int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
- int ret = -1;
+ int ret = 0;
- if (sock == -1)
- return ret;
- if (priv_get_ifname(priv, &ifr->ifr_name) == 0)
- ret = ioctl(sock, req, ifr);
+ if (sock == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = mlx5_get_ifname(dev, &ifr->ifr_name);
+ if (ret)
+ goto error;
+ ret = ioctl(sock, req, ifr);
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
close(sock);
- return ret;
-}
-
-/**
- * Return the number of active VFs for the current device.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[out] num_vfs
- * Number of active VFs.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-int
-priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs)
-{
- /* The sysfs entry name depends on the operating system. */
- const char **name = (const char *[]){
- "device/sriov_numvfs",
- "device/mlx5_num_vfs",
- NULL,
- };
- int ret;
-
- do {
- unsigned long ulong_num_vfs;
-
- ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs);
- if (!ret)
- *num_vfs = ulong_num_vfs;
- } while (*(++name) && ret);
- return ret;
+ return 0;
+error:
+ close(sock);
+ return -rte_errno;
}
/**
* Get device MTU.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] mtu
* MTU value output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
- */
-int
-priv_get_mtu(struct priv *priv, uint16_t *mtu)
-{
- unsigned long ulong_mtu;
-
- if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1)
- return -1;
- *mtu = ulong_mtu;
- return 0;
-}
-
-/**
- * Read device counter from sysfs.
- *
- * @param priv
- * Pointer to private structure.
- * @param name
- * Counter name.
- * @param[out] cntr
- * Counter output buffer.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr)
+mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
{
- unsigned long ulong_ctr;
+ struct ifreq request;
+ int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request);
- if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1)
- return -1;
- *cntr = ulong_ctr;
+ if (ret)
+ return ret;
+ *mtu = request.ifr_mtu;
return 0;
}
/**
* Set device MTU.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param mtu
* MTU value to set.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_set_mtu(struct priv *priv, uint16_t mtu)
+mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
- uint16_t new_mtu;
+ struct ifreq request = { .ifr_mtu = mtu, };
- if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
- priv_get_mtu(priv, &new_mtu))
- return -1;
- if (new_mtu == mtu)
- return 0;
- errno = EINVAL;
- return -1;
+ return mlx5_ifreq(dev, SIOCSIFMTU, &request);
}
/**
* Set device flags.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param keep
* Bitmask for flags that must remain untouched.
* @param flags
* Bitmask for flags to modify.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
+mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
{
- unsigned long tmp;
+ struct ifreq request;
+ int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request);
- if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1)
- return -1;
- tmp &= keep;
- tmp |= (flags & (~keep));
- return priv_set_sysfs_ulong(priv, "flags", tmp);
+ if (ret)
+ return ret;
+ request.ifr_flags &= keep;
+ request.ifr_flags |= flags & ~keep;
+ return mlx5_ifreq(dev, SIOCSIFFLAGS, &request);
}
/**
- * Ethernet device configuration.
- *
- * Prepare the driver for a given number of TX and RX queues.
+ * DPDK callback for Ethernet device configuration.
*
* @param dev
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-dev_configure(struct rte_eth_dev *dev)
+int
+mlx5_dev_configure(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
unsigned int rxqs_n = dev->data->nb_rx_queues;
@@ -524,37 +331,24 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
- uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t supp_rx_offloads =
- (mlx5_priv_get_rx_port_offloads(priv) |
- mlx5_priv_get_rx_queue_offloads(priv));
- uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
- if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
- ERROR("Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, supp_tx_offloads);
- return ENOTSUP;
- }
- if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
- ERROR("Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- rx_offloads, supp_rx_offloads);
- return ENOTSUP;
- }
+ int ret = 0;
+
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
- /* MLX5 RSS only support 40bytes key. */
- return EINVAL;
+ DRV_LOG(ERR, "port %u RSS key len must be %zu Bytes long",
+ dev->data->port_id, rss_hash_default_key_len);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
priv->rss_conf.rss_key =
rte_realloc(priv->rss_conf.rss_key,
rss_hash_default_key_len, 0);
if (!priv->rss_conf.rss_key) {
- ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n);
- return ENOMEM;
+ DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
+ dev->data->port_id, rxqs_n);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
memcpy(priv->rss_conf.rss_key,
use_app_rss_key ?
@@ -566,18 +360,20 @@ dev_configure(struct rte_eth_dev *dev)
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
if (txqs_n != priv->txqs_n) {
- INFO("%p: TX queues number update: %u -> %u",
- (void *)dev, priv->txqs_n, txqs_n);
+ DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
+ dev->data->port_id, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
if (rxqs_n > priv->config.ind_table_max_size) {
- ERROR("cannot handle this many RX queues (%u)", rxqs_n);
- return EINVAL;
+ DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
+ dev->data->port_id, rxqs_n);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
if (rxqs_n == priv->rxqs_n)
return 0;
- INFO("%p: RX queues number update: %u -> %u",
- (void *)dev, priv->rxqs_n, rxqs_n);
+ DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
+ dev->data->port_id, priv->rxqs_n, rxqs_n);
priv->rxqs_n = rxqs_n;
/* If the requested number of RX queues is not a power of two, use the
* maximum indirection table size for better balancing.
@@ -585,8 +381,9 @@ dev_configure(struct rte_eth_dev *dev)
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
priv->config.ind_table_max_size :
rxqs_n));
- if (priv_rss_reta_index_resize(priv, reta_idx_n))
- return ENOMEM;
+ ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
+ if (ret)
+ return ret;
/* When the number of RX queues is not a power of two, the remaining
* table entries are padded with reused WQs and hashes are not spread
* uniformly. */
@@ -599,25 +396,42 @@ dev_configure(struct rte_eth_dev *dev)
}
/**
- * DPDK callback for Ethernet device configuration.
+ * Sets default tuning parameters.
*
* @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success, negative errno value on failure.
+ * Pointer to Ethernet device.
+ * @param[out] info
+ * Info structure output buffer.
*/
-int
-mlx5_dev_configure(struct rte_eth_dev *dev)
+static void
+mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct priv *priv = dev->data->dev_private;
- int ret;
- priv_lock(priv);
- ret = dev_configure(dev);
- assert(ret >= 0);
- priv_unlock(priv);
- return -ret;
+ /* Minimum CPU utilization. */
+ info->default_rxportconf.ring_size = 256;
+ info->default_txportconf.ring_size = 256;
+ info->default_rxportconf.burst_size = 64;
+ info->default_txportconf.burst_size = 64;
+ if (priv->link_speed_capa & ETH_LINK_SPEED_100G) {
+ info->default_rxportconf.nb_queues = 16;
+ info->default_txportconf.nb_queues = 16;
+ if (dev->data->nb_rx_queues > 2 ||
+ dev->data->nb_tx_queues > 2) {
+ /* Max Throughput. */
+ info->default_rxportconf.ring_size = 2048;
+ info->default_txportconf.ring_size = 2048;
+ }
+ } else {
+ info->default_rxportconf.nb_queues = 8;
+ info->default_txportconf.nb_queues = 8;
+ if (dev->data->nb_rx_queues > 2 ||
+ dev->data->nb_tx_queues > 2) {
+ /* Max Throughput. */
+ info->default_rxportconf.ring_size = 4096;
+ info->default_txportconf.ring_size = 4096;
+ }
+ }
}
/**
@@ -636,9 +450,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
- priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
info->max_rx_pktlen = 65536;
@@ -653,22 +464,30 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
max = 65535;
info->max_rx_queues = max;
info->max_tx_queues = max;
- info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_queue_offload_capa =
- mlx5_priv_get_rx_queue_offloads(priv);
- info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) |
+ info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
+ info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
+ info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
info->rx_queue_offload_capa);
- info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
- if (priv_get_ifname(priv, &ifname) == 0)
+ info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
+ if (mlx5_get_ifname(dev, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
priv->reta_idx_n : config->ind_table_max_size;
- info->hash_key_size = priv->rss_conf.rss_key_len;
+ info->hash_key_size = rss_hash_default_key_len;
info->speed_capa = priv->link_speed_capa;
info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
- priv_unlock(priv);
+ mlx5_set_default_params(dev, info);
}
+/**
+ * Get supported packet types.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * A pointer to the supported Packet types array.
+ */
const uint32_t *
mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
@@ -691,6 +510,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
};
if (dev->rx_pkt_burst == mlx5_rx_burst ||
+ dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
dev->rx_pkt_burst == mlx5_rx_burst_vec)
return ptypes;
return NULL;
@@ -701,11 +521,15 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
*
* @param dev
* Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
+ * @param[out] link
+ * Storage for current link status.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
{
struct priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
@@ -714,26 +538,28 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
struct ifreq ifr;
struct rte_eth_link dev_link;
int link_speed = 0;
+ int ret;
- /* priv_lock() is not taken to allow concurrent calls. */
-
- (void)wait_to_complete;
- if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
- WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&edata;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
- strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
link_speed = ethtool_cmd_speed(&edata);
if (link_speed == -1)
- dev_link.link_speed = 0;
+ dev_link.link_speed = ETH_SPEED_NUM_NONE;
else
dev_link.link_speed = link_speed;
priv->link_speed_capa = 0;
@@ -753,13 +579,13 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
- /* Link status changed. */
- dev->data->dev_link = dev_link;
- return 0;
+ if ((dev_link.link_speed && !dev_link.link_status) ||
+ (!dev_link.link_speed && dev_link.link_status)) {
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
- /* Link status is still the same. */
- return -1;
+ *link = dev_link;
+ return 0;
}
/**
@@ -767,31 +593,41 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
*
* @param dev
* Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
+ * @param[out] link
+ * Storage for current link status.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+
{
struct priv *priv = dev->data->dev_private;
struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
struct ifreq ifr;
struct rte_eth_link dev_link;
uint64_t sc;
+ int ret;
- (void)wait_to_complete;
- if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
- WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&gcmd;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
- strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
+ DRV_LOG(DEBUG,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
@@ -802,10 +638,13 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
*ecmd = gcmd;
ifr.ifr_data = (void *)ecmd;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
- strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
+ DRV_LOG(DEBUG,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
dev_link.link_speed = ecmd->speed;
sc = ecmd->link_mode_masks[0] |
@@ -849,121 +688,13 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
- /* Link status changed. */
- dev->data->dev_link = dev_link;
- return 0;
- }
- /* Link status is still the same. */
- return -1;
-}
-
-/**
- * Enable receiving and transmitting traffic.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_link_start(struct priv *priv)
-{
- struct rte_eth_dev *dev = priv->dev;
- int err;
-
- dev->tx_pkt_burst = priv_select_tx_function(priv, dev);
- dev->rx_pkt_burst = priv_select_rx_function(priv, dev);
- err = priv_dev_traffic_enable(priv, dev);
- if (err)
- ERROR("%p: error occurred while configuring control flows: %s",
- (void *)priv, strerror(err));
- err = priv_flow_start(priv, &priv->flows);
- if (err)
- ERROR("%p: error occurred while configuring flows: %s",
- (void *)priv, strerror(err));
-}
-
-/**
- * Disable receiving and transmitting traffic.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_link_stop(struct priv *priv)
-{
- struct rte_eth_dev *dev = priv->dev;
-
- priv_flow_stop(priv, &priv->flows);
- priv_dev_traffic_disable(priv, dev);
- dev->rx_pkt_burst = removed_rx_burst;
- dev->tx_pkt_burst = removed_tx_burst;
-}
-
-/**
- * Retrieve physical link information and update rx/tx_pkt_burst callbacks
- * accordingly.
- *
- * @param priv
- * Pointer to private structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
- */
-int
-priv_link_update(struct priv *priv, int wait_to_complete)
-{
- struct rte_eth_dev *dev = priv->dev;
- struct utsname utsname;
- int ver[3];
- int ret;
- struct rte_eth_link dev_link = dev->data->dev_link;
-
- if (uname(&utsname) == -1 ||
- sscanf(utsname.release, "%d.%d.%d",
- &ver[0], &ver[1], &ver[2]) != 3 ||
- KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
- ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete);
- else
- ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete);
- /* If lsc interrupt is disabled, should always be ready for traffic. */
- if (!dev->data->dev_conf.intr_conf.lsc) {
- priv_link_start(priv);
- return ret;
+ if ((dev_link.link_speed && !dev_link.link_status) ||
+ (!dev_link.link_speed && dev_link.link_status)) {
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
- /* Re-select burst callbacks only if link status has been changed. */
- if (!ret && dev_link.link_status != dev->data->dev_link.link_status) {
- if (dev->data->dev_link.link_status == ETH_LINK_UP)
- priv_link_start(priv);
- else
- priv_link_stop(priv);
- }
- return ret;
-}
-
-/**
- * Querying the link status till it changes to the desired state.
- * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS.
- *
- * @param priv
- * Pointer to private structure.
- * @param status
- * Link desired status.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-priv_force_link_status_change(struct priv *priv, int status)
-{
- int try = 0;
-
- while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) {
- priv_link_update(priv, 0);
- if (priv->dev->data->dev_link.link_status == status)
- return 0;
- try++;
- sleep(1);
- }
- return -EAGAIN;
+ *link = dev_link;
+ return 0;
}
/**
@@ -972,17 +703,42 @@ priv_force_link_status_change(struct priv *priv, int status)
* @param dev
* Pointer to Ethernet device structure.
* @param wait_to_complete
- * Wait for request completion (ignored).
+ * Wait for request completion.
+ *
+ * @return
+ * 0 if link status was not updated, positive if it was, a negative errno
+ * value otherwise and rte_errno is set.
*/
int
mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = dev->data->dev_private;
int ret;
+ struct rte_eth_link dev_link;
+ time_t start_time = time(NULL);
- priv_lock(priv);
- ret = priv_link_update(priv, wait_to_complete);
- priv_unlock(priv);
+ do {
+ ret = mlx5_link_update_unlocked_gs(dev, &dev_link);
+ if (ret)
+ ret = mlx5_link_update_unlocked_gset(dev, &dev_link);
+ if (ret == 0)
+ break;
+ /* Handle wait to complete situation. */
+ if (wait_to_complete && ret == -EAGAIN) {
+ if (abs((int)difftime(time(NULL), start_time)) <
+ MLX5_LINK_STATUS_TIMEOUT) {
+ usleep(0);
+ continue;
+ } else {
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ } else if (ret < 0) {
+ return ret;
+ }
+ } while (wait_to_complete);
+ ret = !!memcmp(&dev->data->dev_link, &dev_link,
+ sizeof(struct rte_eth_link));
+ dev->data->dev_link = dev_link;
return ret;
}
@@ -995,39 +751,33 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
* New MTU.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct priv *priv = dev->data->dev_private;
- uint16_t kern_mtu;
- int ret = 0;
+ uint16_t kern_mtu = 0;
+ int ret;
- priv_lock(priv);
- ret = priv_get_mtu(priv, &kern_mtu);
+ ret = mlx5_get_mtu(dev, &kern_mtu);
if (ret)
- goto out;
+ return ret;
/* Set kernel interface MTU first. */
- ret = priv_set_mtu(priv, mtu);
+ ret = mlx5_set_mtu(dev, mtu);
if (ret)
- goto out;
- ret = priv_get_mtu(priv, &kern_mtu);
+ return ret;
+ ret = mlx5_get_mtu(dev, &kern_mtu);
if (ret)
- goto out;
+ return ret;
if (kern_mtu == mtu) {
priv->mtu = mtu;
- DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
+ DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
+ dev->data->port_id, mtu);
+ return 0;
}
- priv_unlock(priv);
- return 0;
-out:
- ret = errno;
- WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
- strerror(ret));
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
/**
@@ -1039,12 +789,11 @@ out:
* Flow control output buffer.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_GPAUSEPARAM
@@ -1052,15 +801,14 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int ret;
ifr.ifr_data = (void *)&ethpause;
- priv_lock(priv);
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- ret = errno;
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
- " failed: %s",
- strerror(ret));
- goto out;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
+ " %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
-
fc_conf->autoneg = ethpause.autoneg;
if (ethpause.rx_pause && ethpause.tx_pause)
fc_conf->mode = RTE_FC_FULL;
@@ -1070,12 +818,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
fc_conf->mode = RTE_FC_TX_PAUSE;
else
fc_conf->mode = RTE_FC_NONE;
- ret = 0;
-
-out:
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
+ return 0;
}
/**
@@ -1087,12 +830,11 @@ out:
* Flow control parameters.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_SPAUSEPARAM
@@ -1112,21 +854,15 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
-
- priv_lock(priv);
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- ret = errno;
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
- " failed: %s",
- strerror(ret));
- goto out;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
- ret = 0;
-
-out:
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
+ return 0;
}
/**
@@ -1138,7 +874,7 @@ out:
* PCI bus address output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
@@ -1149,8 +885,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
MKSTR(path, "%s/device/uevent", device->ibdev_path);
file = fopen(path, "rb");
- if (file == NULL)
- return -1;
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
while (fgets(line, sizeof(line), file) == line) {
size_t len = strlen(line);
int ret;
@@ -1180,46 +918,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
}
/**
- * Update the link status.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * Zero if the callback process can be called immediately.
- */
-static int
-priv_link_status_update(struct priv *priv)
-{
- struct rte_eth_link *link = &priv->dev->data->dev_link;
-
- priv_link_update(priv, 0);
- if (((link->link_speed == 0) && link->link_status) ||
- ((link->link_speed != 0) && !link->link_status)) {
- /*
- * Inconsistent status. Event likely occurred before the
- * kernel netdevice exposes the new status.
- */
- if (!priv->pending_alarm) {
- priv->pending_alarm = 1;
- rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
- mlx5_dev_link_status_handler,
- priv->dev);
- }
- return 1;
- } else if (unlikely(priv->pending_alarm)) {
- /* Link interrupt occurred while alarm is already scheduled. */
- priv->pending_alarm = 0;
- rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev);
- }
- return 0;
-}
-
-/**
* Device status handler.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param events
* Pointer to event flags holder.
*
@@ -1227,60 +929,37 @@ priv_link_status_update(struct priv *priv)
* Events bitmap of callback process which can be called immediately.
*/
static uint32_t
-priv_dev_status_handler(struct priv *priv)
+mlx5_dev_status_handler(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct ibv_async_event event;
uint32_t ret = 0;
+ if (mlx5_link_update(dev, 0) == -EAGAIN) {
+ usleep(0);
+ return 0;
+ }
/* Read all message and acknowledge them. */
for (;;) {
if (mlx5_glue->get_async_event(priv->ctx, &event))
break;
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
event.event_type == IBV_EVENT_PORT_ERR) &&
- (priv->dev->data->dev_conf.intr_conf.lsc == 1))
+ (dev->data->dev_conf.intr_conf.lsc == 1))
ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
- priv->dev->data->dev_conf.intr_conf.rmv == 1)
+ dev->data->dev_conf.intr_conf.rmv == 1)
ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
else
- DEBUG("event type %d on port %d not handled",
- event.event_type, event.element.port_num);
+ DRV_LOG(DEBUG,
+ "port %u event type %d on not handled",
+ dev->data->port_id, event.event_type);
mlx5_glue->ack_async_event(&event);
}
- if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
- if (priv_link_status_update(priv))
- ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC);
return ret;
}
/**
- * Handle delayed link status event.
- *
- * @param arg
- * Registered argument.
- */
-void
-mlx5_dev_link_status_handler(void *arg)
-{
- struct rte_eth_dev *dev = arg;
- struct priv *priv = dev->data->dev_private;
- int ret;
-
- while (!priv_trylock(priv)) {
- /* Alarm is being canceled. */
- if (priv->pending_alarm == 0)
- return;
- rte_pause();
- }
- priv->pending_alarm = 0;
- ret = priv_link_status_update(priv);
- priv_unlock(priv);
- if (!ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
-}
-
-/**
* Handle interrupts from the NIC.
*
* @param[in] intr_handle
@@ -1292,12 +971,9 @@ void
mlx5_dev_interrupt_handler(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
- struct priv *priv = dev->data->dev_private;
uint32_t events;
- priv_lock(priv);
- events = priv_dev_status_handler(priv);
- priv_unlock(priv);
+ events = mlx5_dev_status_handler(dev);
if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
@@ -1314,24 +990,21 @@ static void
mlx5_dev_handler_socket(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
- struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
- priv_socket_handle(priv);
- priv_unlock(priv);
+ mlx5_socket_handle(dev);
}
/**
* Uninstall interrupt handler.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to the rte_eth_dev structure.
+ * Pointer to Ethernet device.
*/
void
-priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rmv)
rte_intr_callback_unregister(&priv->intr_handle,
@@ -1339,10 +1012,6 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
if (priv->primary_socket)
rte_intr_callback_unregister(&priv->intr_handle_socket,
mlx5_dev_handler_socket, dev);
- if (priv->pending_alarm) {
- priv->pending_alarm = 0;
- rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
- }
priv->intr_handle.fd = 0;
priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
priv->intr_handle_socket.fd = 0;
@@ -1352,21 +1021,24 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
/**
* Install interrupt handler.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to the rte_eth_dev structure.
+ * Pointer to Ethernet device.
*/
void
-priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
- int rc, flags;
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ int flags;
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
- rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
- if (rc < 0) {
- INFO("failed to change file descriptor async event queue");
+ ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ if (ret) {
+ DRV_LOG(INFO,
+ "port %u failed to change file descriptor async event"
+ " queue",
+ dev->data->port_id);
dev->data->dev_conf.intr_conf.lsc = 0;
dev->data->dev_conf.intr_conf.rmv = 0;
}
@@ -1377,9 +1049,11 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
rte_intr_callback_register(&priv->intr_handle,
mlx5_dev_interrupt_handler, dev);
}
-
- rc = priv_socket_init(priv);
- if (!rc && priv->primary_socket) {
+ ret = mlx5_socket_init(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot initialise socket: %s",
+ dev->data->port_id, strerror(rte_errno));
+ else if (priv->primary_socket) {
priv->intr_handle_socket.fd = priv->primary_socket;
priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
rte_intr_callback_register(&priv->intr_handle_socket,
@@ -1388,41 +1062,18 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
}
/**
- * Change the link state (UP / DOWN).
- *
- * @param priv
- * Pointer to private data structure.
- * @param up
- * Nonzero for link up, otherwise link down.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_dev_set_link(struct priv *priv, int up)
-{
- return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP);
-}
-
-/**
* DPDK callback to bring the link DOWN.
*
* @param dev
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_set_link_down(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int err;
-
- priv_lock(priv);
- err = priv_dev_set_link(priv, 0);
- priv_unlock(priv);
- return err;
+ return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP);
}
/**
@@ -1432,63 +1083,68 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_set_link_up(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int err;
-
- priv_lock(priv);
- err = priv_dev_set_link(priv, 1);
- priv_unlock(priv);
- return err;
+ return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
}
/**
* Configure the TX function to use.
*
- * @param priv
- * Pointer to private data structure.
* @param dev
- * Pointer to rte_eth_dev structure.
+ * Pointer to private data structure.
*
* @return
* Pointer to selected Tx burst function.
*/
eth_tx_burst_t
-priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_select_tx_function(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
struct mlx5_dev_config *config = &priv->config;
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO));
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO));
+ int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
assert(priv != NULL);
/* Select appropriate TX function. */
- if (vlan_insert || tso)
+ if (vlan_insert || tso || swp)
return tx_pkt_burst;
if (config->mps == MLX5_MPW_ENHANCED) {
- if (priv_check_vec_tx_support(priv, dev) > 0) {
- if (priv_check_raw_vec_tx_support(priv, dev) > 0)
+ if (mlx5_check_vec_tx_support(dev) > 0) {
+ if (mlx5_check_raw_vec_tx_support(dev) > 0)
tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
tx_pkt_burst = mlx5_tx_burst_vec;
- DEBUG("selected Enhanced MPW TX vectorized function");
+ DRV_LOG(DEBUG,
+ "port %u selected enhanced MPW Tx vectorized"
+ " function",
+ dev->data->port_id);
} else {
tx_pkt_burst = mlx5_tx_burst_empw;
- DEBUG("selected Enhanced MPW TX function");
+ DRV_LOG(DEBUG,
+ "port %u selected enhanced MPW Tx function",
+ dev->data->port_id);
}
} else if (config->mps && (config->txq_inline > 0)) {
tx_pkt_burst = mlx5_tx_burst_mpw_inline;
- DEBUG("selected MPW inline TX function");
+ DRV_LOG(DEBUG, "port %u selected MPW inline Tx function",
+ dev->data->port_id);
} else if (config->mps) {
tx_pkt_burst = mlx5_tx_burst_mpw;
- DEBUG("selected MPW TX function");
+ DRV_LOG(DEBUG, "port %u selected MPW Tx function",
+ dev->data->port_id);
}
return tx_pkt_burst;
}
@@ -1496,23 +1152,24 @@ priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
/**
* Configure the RX function to use.
*
- * @param priv
- * Pointer to private data structure.
* @param dev
- * Pointer to rte_eth_dev structure.
+ * Pointer to private data structure.
*
* @return
* Pointer to selected Rx burst function.
*/
eth_rx_burst_t
-priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
+mlx5_select_rx_function(struct rte_eth_dev *dev)
{
eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
- assert(priv != NULL);
- if (priv_check_vec_rx_support(priv) > 0) {
+ assert(dev != NULL);
+ if (mlx5_check_vec_rx_support(dev) > 0) {
rx_pkt_burst = mlx5_rx_burst_vec;
- DEBUG("selected RX vectorized function");
+ DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
+ dev->data->port_id);
+ } else if (mlx5_mprq_enabled(dev)) {
+ rx_pkt_burst = mlx5_rx_burst_mprq;
}
return rx_pkt_burst;
}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 26002c4b..994be05b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1,9 +1,10 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#include <sys/queue.h>
+#include <stdint.h>
#include <string.h>
/* Verbs header. */
@@ -16,6 +17,9 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_eth_ctrl.h>
#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
@@ -27,12 +31,13 @@
#include "mlx5_prm.h"
#include "mlx5_glue.h"
-/* Define minimal priority for control plane flows. */
-#define MLX5_CTRL_FLOW_PRIORITY 4
+/* Flow priority for control plane flows. */
+#define MLX5_CTRL_FLOW_PRIORITY 1
/* Internet Protocol versions. */
#define MLX5_IPV4 4
#define MLX5_IPV6 6
+#define MLX5_GRE 47
#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
struct ibv_flow_spec_counter_action {
@@ -44,40 +49,62 @@ struct ibv_flow_spec_counter_action {
extern const struct eth_dev_ops mlx5_dev_ops;
extern const struct eth_dev_ops mlx5_dev_ops_isolate;
+/** Structure give to the conversion functions. */
+struct mlx5_flow_data {
+ struct rte_eth_dev *dev; /** Ethernet device. */
+ struct mlx5_flow_parse *parser; /** Parser context. */
+ struct rte_flow_error *error; /** Error context. */
+};
+
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
const void *default_mask,
- void *data);
+ struct mlx5_flow_data *data);
static int
mlx5_flow_create_vlan(const struct rte_flow_item *item,
const void *default_mask,
- void *data);
+ struct mlx5_flow_data *data);
static int
mlx5_flow_create_ipv4(const struct rte_flow_item *item,
const void *default_mask,
- void *data);
+ struct mlx5_flow_data *data);
static int
mlx5_flow_create_ipv6(const struct rte_flow_item *item,
const void *default_mask,
- void *data);
+ struct mlx5_flow_data *data);
static int
mlx5_flow_create_udp(const struct rte_flow_item *item,
const void *default_mask,
- void *data);
+ struct mlx5_flow_data *data);
static int
mlx5_flow_create_tcp(const struct rte_flow_item *item,
const void *default_mask,
- void *data);
+ struct mlx5_flow_data *data);
static int
mlx5_flow_create_vxlan(const struct rte_flow_item *item,
const void *default_mask,
- void *data);
+ struct mlx5_flow_data *data);
+
+static int
+mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,
+ const void *default_mask,
+ struct mlx5_flow_data *data);
+
+static int
+mlx5_flow_create_gre(const struct rte_flow_item *item,
+ const void *default_mask,
+ struct mlx5_flow_data *data);
+
+static int
+mlx5_flow_create_mpls(const struct rte_flow_item *item,
+ const void *default_mask,
+ struct mlx5_flow_data *data);
struct mlx5_flow_parse;
@@ -89,7 +116,7 @@ static int
mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);
static int
-mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser);
+mlx5_flow_create_count(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser);
/* Hash RX queue types. */
enum hash_rxq_type {
@@ -100,6 +127,7 @@ enum hash_rxq_type {
HASH_RXQ_UDPV6,
HASH_RXQ_IPV6,
HASH_RXQ_ETH,
+ HASH_RXQ_TUNNEL,
};
/* Initialization data for hash RX queue. */
@@ -206,10 +234,10 @@ struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
uint32_t mark:1; /**< Set if the flow is marked. */
uint32_t drop:1; /**< Drop queue. */
- uint16_t queues_n; /**< Number of entries in queue[]. */
+ struct rte_flow_action_rss rss_conf; /**< RSS configuration */
uint16_t (*queues)[]; /**< Queues indexes to use. */
- struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
uint8_t rss_key[40]; /**< copy of the RSS key. */
+ uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */
struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
@@ -222,6 +250,33 @@ struct rte_flow {
__VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
}
+#define IS_TUNNEL(type) ( \
+ (type) == RTE_FLOW_ITEM_TYPE_VXLAN || \
+ (type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \
+ (type) == RTE_FLOW_ITEM_TYPE_GRE || \
+ (type) == RTE_FLOW_ITEM_TYPE_MPLS)
+
+const uint32_t flow_ptype[] = {
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE,
+ [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [RTE_FLOW_ITEM_TYPE_MPLS] = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
+};
+
+#define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12)
+
+const uint32_t ptype_ext[] = {
+ [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_L4_UDP,
+ [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)] = RTE_PTYPE_TUNNEL_VXLAN_GPE |
+ RTE_PTYPE_L4_UDP,
+ [PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE,
+ [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)] =
+ RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
+ [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)] =
+ RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+};
+
/** Structure to generate a simple graph of layers supported by the NIC. */
struct mlx5_flow_items {
/** List of possible actions for these items. */
@@ -247,11 +302,12 @@ struct mlx5_flow_items {
* Internal structure to store the conversion.
*
* @return
- * 0 on success, negative value otherwise.
+ * 0 on success, a negative errno value otherwise and rte_errno is
+ * set.
*/
int (*convert)(const struct rte_flow_item *item,
const void *default_mask,
- void *data);
+ struct mlx5_flow_data *data);
/** Size in bytes of the destination structure. */
const unsigned int dst_sz;
/** List of possible following items. */
@@ -274,7 +330,9 @@ static const enum rte_flow_action_type valid_actions[] = {
static const struct mlx5_flow_items mlx5_flow_items[] = {
[RTE_FLOW_ITEM_TYPE_END] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_VXLAN),
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+ RTE_FLOW_ITEM_TYPE_GRE),
},
[RTE_FLOW_ITEM_TYPE_ETH] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
@@ -297,6 +355,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.actions = valid_actions,
.mask = &(const struct rte_flow_item_vlan){
.tci = -1,
+ .inner_type = -1,
},
.default_mask = &rte_flow_item_vlan_mask,
.mask_sz = sizeof(struct rte_flow_item_vlan),
@@ -305,7 +364,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
},
[RTE_FLOW_ITEM_TYPE_IPV4] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_TCP),
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_GRE),
.actions = valid_actions,
.mask = &(const struct rte_flow_item_ipv4){
.hdr = {
@@ -322,7 +382,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
},
[RTE_FLOW_ITEM_TYPE_IPV6] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_TCP),
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_GRE),
.actions = valid_actions,
.mask = &(const struct rte_flow_item_ipv6){
.hdr = {
@@ -349,7 +410,9 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.dst_sz = sizeof(struct ibv_flow_spec_ipv6),
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+ RTE_FLOW_ITEM_TYPE_MPLS),
.actions = valid_actions,
.mask = &(const struct rte_flow_item_udp){
.hdr = {
@@ -375,8 +438,43 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.convert = mlx5_flow_create_tcp,
.dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
+ [RTE_FLOW_ITEM_TYPE_GRE] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_MPLS),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_gre){
+ .protocol = -1,
+ },
+ .default_mask = &rte_flow_item_gre_mask,
+ .mask_sz = sizeof(struct rte_flow_item_gre),
+ .convert = mlx5_flow_create_gre,
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ .dst_sz = sizeof(struct ibv_flow_spec_gre),
+#else
+ .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
+#endif
+ },
+ [RTE_FLOW_ITEM_TYPE_MPLS] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_mpls){
+ .label_tc_s = "\xff\xff\xf0",
+ },
+ .default_mask = &rte_flow_item_mpls_mask,
+ .mask_sz = sizeof(struct rte_flow_item_mpls),
+ .convert = mlx5_flow_create_mpls,
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ .dst_sz = sizeof(struct ibv_flow_spec_mpls),
+#endif
+ },
[RTE_FLOW_ITEM_TYPE_VXLAN] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4, /* For L3 VXLAN. */
+ RTE_FLOW_ITEM_TYPE_IPV6), /* For L3 VXLAN. */
.actions = valid_actions,
.mask = &(const struct rte_flow_item_vxlan){
.vni = "\xff\xff\xff",
@@ -386,28 +484,43 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.convert = mlx5_flow_create_vxlan,
.dst_sz = sizeof(struct ibv_flow_spec_tunnel),
},
+ [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_vxlan_gpe){
+ .vni = "\xff\xff\xff",
+ },
+ .default_mask = &rte_flow_item_vxlan_gpe_mask,
+ .mask_sz = sizeof(struct rte_flow_item_vxlan_gpe),
+ .convert = mlx5_flow_create_vxlan_gpe,
+ .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
+ },
};
/** Structure to pass to the conversion function. */
struct mlx5_flow_parse {
- uint32_t inner; /**< Set once VXLAN is encountered. */
+ uint32_t inner; /**< Verbs value, set once tunnel is encountered. */
uint32_t create:1;
/**< Whether resources should remain after a validate. */
uint32_t drop:1; /**< Target is a drop queue. */
uint32_t mark:1; /**< Mark is present in the flow. */
uint32_t count:1; /**< Count is present in the flow. */
uint32_t mark_id; /**< Mark identifier. */
+ struct rte_flow_action_rss rss_conf; /**< RSS configuration */
uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
- uint16_t queues_n; /**< Number of entries in queue[]. */
- struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
uint8_t rss_key[40]; /**< copy of the RSS key. */
enum hash_rxq_type layer; /**< Last pattern layer detected. */
+ enum hash_rxq_type out_layer; /**< Last outer pattern layer detected. */
+ uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */
struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
struct {
struct ibv_flow_attr *ibv_attr;
/**< Pointer to Verbs attributes. */
unsigned int offset;
/**< Current position or total size of the attribute. */
+ uint64_t hash_fields; /**< Verbs hash fields. */
} queue[RTE_DIM(hash_rxq_init)];
};
@@ -436,9 +549,17 @@ struct mlx5_fdir {
struct rte_flow_item_ipv6 ipv6;
} l3;
union {
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ } l3_mask;
+ union {
struct rte_flow_item_udp udp;
struct rte_flow_item_tcp tcp;
} l4;
+ union {
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_tcp tcp;
+ } l4_mask;
struct rte_flow_action_queue queue;
};
@@ -449,7 +570,7 @@ struct ibv_spec_header {
};
/**
- * Check support for a given item.
+ * Check item is fully supported by the NIC matching capability.
*
* @param item[in]
* Item specification.
@@ -460,121 +581,56 @@ struct ibv_spec_header {
* Bit-Mask size in bytes.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_item_validate(const struct rte_flow_item *item,
const uint8_t *mask, unsigned int size)
{
- int ret = 0;
-
- if (!item->spec && (item->mask || item->last))
- return -1;
- if (item->spec && !item->mask) {
- unsigned int i;
- const uint8_t *spec = item->spec;
-
- for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
- }
- if (item->last && !item->mask) {
- unsigned int i;
- const uint8_t *spec = item->last;
-
- for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
- }
- if (item->mask) {
- unsigned int i;
- const uint8_t *spec = item->spec;
-
- for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
- }
- if (item->spec && item->last) {
- uint8_t spec[size];
- uint8_t last[size];
- const uint8_t *apply = mask;
- unsigned int i;
-
- if (item->mask)
- apply = item->mask;
- for (i = 0; i < size; ++i) {
- spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
- last[i] = ((const uint8_t *)item->last)[i] & apply[i];
- }
- ret = memcmp(spec, last, size);
- }
- return ret;
-}
+ unsigned int i;
+ const uint8_t *spec = item->spec;
+ const uint8_t *last = item->last;
+ const uint8_t *m = item->mask ? item->mask : mask;
-/**
- * Copy the RSS configuration from the user ones, of the rss_conf is null,
- * uses the driver one.
- *
- * @param priv
- * Pointer to private structure.
- * @param parser
- * Internal parser structure.
- * @param rss_conf
- * User RSS configuration to save.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_flow_convert_rss_conf(struct priv *priv,
- struct mlx5_flow_parse *parser,
- const struct rte_eth_rss_conf *rss_conf)
-{
+ if (!spec && (item->mask || last))
+ goto error;
+ if (!spec)
+ return 0;
/*
- * This function is also called at the beginning of
- * priv_flow_convert_actions() to initialize the parser with the
- * device default RSS configuration.
+ * Single-pass check to make sure that:
+ * - item->mask is supported, no bits are set outside mask.
+ * - Both masked item->spec and item->last are equal (no range
+ * supported).
*/
- (void)priv;
- if (rss_conf) {
- if (rss_conf->rss_hf & MLX5_RSS_HF_MASK)
- return EINVAL;
- if (rss_conf->rss_key_len != 40)
- return EINVAL;
- if (rss_conf->rss_key_len && rss_conf->rss_key) {
- parser->rss_conf.rss_key_len = rss_conf->rss_key_len;
- memcpy(parser->rss_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
- parser->rss_conf.rss_key = parser->rss_key;
- }
- parser->rss_conf.rss_hf = rss_conf->rss_hf;
+ for (i = 0; i < size; i++) {
+ if (!m[i])
+ continue;
+ if ((m[i] | mask[i]) != mask[i])
+ goto error;
+ if (last && ((spec[i] & m[i]) != (last[i] & m[i])))
+ goto error;
}
return 0;
+error:
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
/**
* Extract attribute to the parser.
*
- * @param priv
- * Pointer to private structure.
* @param[in] attr
* Flow rule attributes.
* @param[out] error
* Perform verbose error reporting if not NULL.
- * @param[in, out] parser
- * Internal parser structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert_attributes(struct priv *priv,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error,
- struct mlx5_flow_parse *parser)
+mlx5_flow_convert_attributes(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
- (void)priv;
- (void)parser;
if (attr->group) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
@@ -596,6 +652,13 @@ priv_flow_convert_attributes(struct priv *priv,
"egress is not supported");
return -rte_errno;
}
+ if (attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ return -rte_errno;
+ }
if (!attr->ingress) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
@@ -609,8 +672,8 @@ priv_flow_convert_attributes(struct priv *priv,
/**
* Extract actions request to the parser.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[in] actions
* Associated actions (list terminated by the END action).
* @param[out] error
@@ -622,83 +685,115 @@ priv_flow_convert_attributes(struct priv *priv,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert_actions(struct priv *priv,
+mlx5_flow_convert_actions(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
struct mlx5_flow_parse *parser)
{
- /*
- * Add default RSS configuration necessary for Verbs to create QP even
- * if no RSS is necessary.
- */
- priv_flow_convert_rss_conf(priv, parser,
- (const struct rte_eth_rss_conf *)
- &priv->rss_conf);
+ enum { FATE = 1, MARK = 2, COUNT = 4, };
+ uint32_t overlap = 0;
+ struct priv *priv = dev->data->dev_private;
+
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
continue;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ if (overlap & FATE)
+ goto exit_action_overlap;
+ overlap |= FATE;
parser->drop = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
actions->conf;
- uint16_t n;
- uint16_t found = 0;
+ if (overlap & FATE)
+ goto exit_action_overlap;
+ overlap |= FATE;
if (!queue || (queue->index > (priv->rxqs_n - 1)))
goto exit_action_not_supported;
- for (n = 0; n < parser->queues_n; ++n) {
- if (parser->queues[n] == queue->index) {
- found = 1;
- break;
- }
- }
- if (parser->queues_n > 1 && !found) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "queue action not in RSS queues");
- return -rte_errno;
- }
- if (!found) {
- parser->queues_n = 1;
- parser->queues[0] = queue->index;
- }
+ parser->queues[0] = queue->index;
+ parser->rss_conf = (struct rte_flow_action_rss){
+ .queue_num = 1,
+ .queue = parser->queues,
+ };
} else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
const struct rte_flow_action_rss *rss =
(const struct rte_flow_action_rss *)
actions->conf;
+ const uint8_t *rss_key;
+ uint32_t rss_key_len;
uint16_t n;
- if (!rss || !rss->num) {
+ if (overlap & FATE)
+ goto exit_action_overlap;
+ overlap |= FATE;
+ if (rss->func &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "the only supported RSS hash"
+ " function is Toeplitz");
+ return -rte_errno;
+ }
+#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (parser->rss_conf.level > 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "a nonzero RSS encapsulation"
+ " level is not supported");
+ return -rte_errno;
+ }
+#endif
+ if (parser->rss_conf.level > 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "RSS encapsulation level"
+ " > 1 is not supported");
+ return -rte_errno;
+ }
+ if (rss->types & MLX5_RSS_HF_MASK) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "unsupported RSS type"
+ " requested");
+ return -rte_errno;
+ }
+ if (rss->key_len) {
+ rss_key_len = rss->key_len;
+ rss_key = rss->key;
+ } else {
+ rss_key_len = rss_hash_default_key_len;
+ rss_key = rss_hash_default_key;
+ }
+ if (rss_key_len != RTE_DIM(parser->rss_key)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "RSS hash key must be"
+ " exactly 40 bytes long");
+ return -rte_errno;
+ }
+ if (!rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no valid queues");
return -rte_errno;
}
- if (parser->queues_n == 1) {
- uint16_t found = 0;
-
- assert(parser->queues_n);
- for (n = 0; n < rss->num; ++n) {
- if (parser->queues[0] ==
- rss->queue[n]) {
- found = 1;
- break;
- }
- }
- if (!found) {
- rte_flow_error_set(error, ENOTSUP,
+ if (rss->queue_num > RTE_DIM(parser->queues)) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
- "queue action not in RSS"
- " queues");
- return -rte_errno;
- }
+ "too many queues for RSS"
+ " context");
+ return -rte_errno;
}
- for (n = 0; n < rss->num; ++n) {
+ for (n = 0; n < rss->queue_num; ++n) {
if (rss->queue[n] >= priv->rxqs_n) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -708,22 +803,26 @@ priv_flow_convert_actions(struct priv *priv,
return -rte_errno;
}
}
- for (n = 0; n < rss->num; ++n)
- parser->queues[n] = rss->queue[n];
- parser->queues_n = rss->num;
- if (priv_flow_convert_rss_conf(priv, parser,
- rss->rss_conf)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "wrong RSS configuration");
- return -rte_errno;
- }
+ parser->rss_conf = (struct rte_flow_action_rss){
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = rss->level ? rss->level : 1,
+ .types = rss->types,
+ .key_len = rss_key_len,
+ .queue_num = rss->queue_num,
+ .key = memcpy(parser->rss_key, rss_key,
+ sizeof(*rss_key) * rss_key_len),
+ .queue = memcpy(parser->queues, rss->queue,
+ sizeof(*rss->queue) *
+ rss->queue_num),
+ };
} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
actions->conf;
+ if (overlap & MARK)
+ goto exit_action_overlap;
+ overlap |= MARK;
if (!mark) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -741,17 +840,26 @@ priv_flow_convert_actions(struct priv *priv,
parser->mark = 1;
parser->mark_id = mark->id;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
+ if (overlap & MARK)
+ goto exit_action_overlap;
+ overlap |= MARK;
parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
priv->config.flow_counter_en) {
+ if (overlap & COUNT)
+ goto exit_action_overlap;
+ overlap |= COUNT;
parser->count = 1;
} else {
goto exit_action_not_supported;
}
}
+ /* When fate is unknown, drop traffic. */
+ if (!(overlap & FATE))
+ parser->drop = 1;
if (parser->drop && parser->mark)
parser->mark = 0;
- if (!parser->queues_n && !parser->drop) {
+ if (!parser->rss_conf.queue_num && !parser->drop) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "no valid action");
return -rte_errno;
@@ -761,13 +869,15 @@ exit_action_not_supported:
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
actions, "action not supported");
return -rte_errno;
+exit_action_overlap:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "overlapping actions are not supported");
+ return -rte_errno;
}
/**
* Validate items.
*
- * @param priv
- * Pointer to private structure.
* @param[in] items
* Pattern specification (list terminated by the END pattern item).
* @param[out] error
@@ -779,25 +889,28 @@ exit_action_not_supported:
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert_items_validate(struct priv *priv,
+mlx5_flow_convert_items_validate(struct rte_eth_dev *dev,
const struct rte_flow_item items[],
struct rte_flow_error *error,
struct mlx5_flow_parse *parser)
{
+ struct priv *priv = dev->data->dev_private;
const struct mlx5_flow_items *cur_item = mlx5_flow_items;
unsigned int i;
+ unsigned int last_voids = 0;
+ int ret = 0;
- (void)priv;
/* Initialise the offsets to start after verbs attribute. */
for (i = 0; i != hash_rxq_init_n; ++i)
parser->queue[i].offset = sizeof(struct ibv_flow_attr);
for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
const struct mlx5_flow_items *token = NULL;
unsigned int n;
- int err;
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ last_voids++;
continue;
+ }
for (i = 0;
cur_item->items &&
cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
@@ -807,24 +920,48 @@ priv_flow_convert_items_validate(struct priv *priv,
break;
}
}
- if (!token)
+ if (!token) {
+ ret = -ENOTSUP;
goto exit_item_not_supported;
+ }
cur_item = token;
- err = mlx5_flow_item_validate(items,
+ ret = mlx5_flow_item_validate(items,
(const uint8_t *)cur_item->mask,
cur_item->mask_sz);
- if (err)
+ if (ret)
goto exit_item_not_supported;
- if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
- if (parser->inner) {
+ if (IS_TUNNEL(items->type)) {
+ if (parser->tunnel &&
+ !((items - last_voids - 1)->type ==
+ RTE_FLOW_ITEM_TYPE_GRE && items->type ==
+ RTE_FLOW_ITEM_TYPE_MPLS)) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
- "cannot recognize multiple"
- " VXLAN encapsulations");
+ "Cannot recognize multiple"
+ " tunnel encapsulations.");
+ return -rte_errno;
+ }
+ if (items->type == RTE_FLOW_ITEM_TYPE_MPLS &&
+ !priv->config.mpls_en) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "MPLS not supported or"
+ " disabled in firmware"
+ " configuration.");
+ return -rte_errno;
+ }
+ if (!priv->config.tunnel_en &&
+ parser->rss_conf.level > 1) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "RSS on tunnel is not supported");
return -rte_errno;
}
parser->inner = IBV_FLOW_SPEC_INNER;
+ parser->tunnel = flow_ptype[items->type];
}
if (parser->drop) {
parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
@@ -832,6 +969,7 @@ priv_flow_convert_items_validate(struct priv *priv,
for (n = 0; n != hash_rxq_init_n; ++n)
parser->queue[n].offset += cur_item->dst_sz;
}
+ last_voids = 0;
}
if (parser->drop) {
parser->queue[HASH_RXQ_ETH].offset +=
@@ -850,102 +988,103 @@ priv_flow_convert_items_validate(struct priv *priv,
}
return 0;
exit_item_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
- return -rte_errno;
+ return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
}
/**
* Allocate memory space to store verbs flow attributes.
*
- * @param priv
- * Pointer to private structure.
- * @param[in] priority
- * Flow priority.
* @param[in] size
* Amount of byte to allocate.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * A verbs flow attribute on success, NULL otherwise.
+ * A verbs flow attribute on success, NULL otherwise and rte_errno is set.
*/
-static struct ibv_flow_attr*
-priv_flow_convert_allocate(struct priv *priv,
- unsigned int priority,
- unsigned int size,
- struct rte_flow_error *error)
+static struct ibv_flow_attr *
+mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error)
{
struct ibv_flow_attr *ibv_attr;
- (void)priv;
ibv_attr = rte_calloc(__func__, 1, size, 0);
if (!ibv_attr) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
- "cannot allocate verbs spec attributes.");
+ "cannot allocate verbs spec attributes");
return NULL;
}
- ibv_attr->priority = priority;
return ibv_attr;
}
/**
- * Finalise verbs flow attributes.
+ * Make inner packet matching with an higher priority from the non Inner
+ * matching.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[in, out] parser
* Internal parser structure.
+ * @param attr
+ * User flow attribute.
*/
static void
-priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser)
+mlx5_flow_update_priority(struct rte_eth_dev *dev,
+ struct mlx5_flow_parse *parser,
+ const struct rte_flow_attr *attr)
{
- const unsigned int ipv4 =
- hash_rxq_init[parser->layer].ip_version == MLX5_IPV4;
- const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6;
- const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
- const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4;
- const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4;
- const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
+ uint16_t priority;
- (void)priv;
- if (parser->layer == HASH_RXQ_ETH) {
- goto fill;
- } else {
- /*
- * This layer becomes useless as the pattern define under
- * layers.
- */
- rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr);
- parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
+ /* 8 priorities >= 16 priorities
+ * Control flow: 4-7 8-15
+ * User normal flow: 1-3 4-7
+ * User tunnel flow: 0-2 0-3
+ */
+ priority = attr->priority * MLX5_VERBS_FLOW_PRIO_8;
+ if (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8)
+ priority /= 2;
+ /*
+ * Lower non-tunnel flow Verbs priority 1 if only support 8 Verbs
+ * priorities, lower 4 otherwise.
+ */
+ if (!parser->inner) {
+ if (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8)
+ priority += 1;
+ else
+ priority += MLX5_VERBS_FLOW_PRIO_8 / 2;
+ }
+ if (parser->drop) {
+ parser->queue[HASH_RXQ_ETH].ibv_attr->priority = priority +
+ hash_rxq_init[HASH_RXQ_ETH].flow_priority;
+ return;
}
- /* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */
- for (i = ohmin; i != (ohmax + 1); ++i) {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
if (!parser->queue[i].ibv_attr)
continue;
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
+ parser->queue[i].ibv_attr->priority = priority +
+ hash_rxq_init[i].flow_priority;
}
- /* Remove impossible flow according to the RSS configuration. */
- if (hash_rxq_init[parser->layer].dpdk_rss_hf &
- parser->rss_conf.rss_hf) {
- /* Remove any other flow. */
- for (i = hmin; i != (hmax + 1); ++i) {
- if ((i == parser->layer) ||
- (!parser->queue[i].ibv_attr))
- continue;
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
- }
- } else if (!parser->queue[ip].ibv_attr) {
- /* no RSS possible with the current configuration. */
- parser->queues_n = 1;
+}
+
+/**
+ * Finalise verbs flow attributes.
+ *
+ * @param[in, out] parser
+ * Internal parser structure.
+ */
+static void
+mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser)
+{
+ unsigned int i;
+ uint32_t inner = parser->inner;
+
+ /* Don't create extra flows for outer RSS. */
+ if (parser->tunnel && parser->rss_conf.level < 2)
return;
- }
-fill:
/*
* Fill missing layers in verbs specifications, or compute the correct
* offset to allocate the memory space for the attributes and
@@ -956,23 +1095,25 @@ fill:
struct ibv_flow_spec_ipv4_ext ipv4;
struct ibv_flow_spec_ipv6 ipv6;
struct ibv_flow_spec_tcp_udp udp_tcp;
+ struct ibv_flow_spec_eth eth;
} specs;
void *dst;
uint16_t size;
if (i == parser->layer)
continue;
- if (parser->layer == HASH_RXQ_ETH) {
+ if (parser->layer == HASH_RXQ_ETH ||
+ parser->layer == HASH_RXQ_TUNNEL) {
if (hash_rxq_init[i].ip_version == MLX5_IPV4) {
size = sizeof(struct ibv_flow_spec_ipv4_ext);
specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){
- .type = IBV_FLOW_SPEC_IPV4_EXT,
+ .type = inner | IBV_FLOW_SPEC_IPV4_EXT,
.size = size,
};
} else {
size = sizeof(struct ibv_flow_spec_ipv6);
specs.ipv6 = (struct ibv_flow_spec_ipv6){
- .type = IBV_FLOW_SPEC_IPV6,
+ .type = inner | IBV_FLOW_SPEC_IPV6,
.size = size,
};
}
@@ -989,7 +1130,7 @@ fill:
(i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) {
size = sizeof(struct ibv_flow_spec_tcp_udp);
specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) {
- .type = ((i == HASH_RXQ_UDPV4 ||
+ .type = inner | ((i == HASH_RXQ_UDPV4 ||
i == HASH_RXQ_UDPV6) ?
IBV_FLOW_SPEC_UDP :
IBV_FLOW_SPEC_TCP),
@@ -1008,10 +1149,110 @@ fill:
}
/**
+ * Update flows according to pattern and RSS hash fields.
+ *
+ * @param[in, out] parser
+ * Internal parser structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_convert_rss(struct mlx5_flow_parse *parser)
+{
+ unsigned int i;
+ enum hash_rxq_type start;
+ enum hash_rxq_type layer;
+ int outer = parser->tunnel && parser->rss_conf.level < 2;
+ uint64_t rss = parser->rss_conf.types;
+
+ layer = outer ? parser->out_layer : parser->layer;
+ if (layer == HASH_RXQ_TUNNEL)
+ layer = HASH_RXQ_ETH;
+ if (outer) {
+ /* Only one hash type for outer RSS. */
+ if (rss && layer == HASH_RXQ_ETH) {
+ start = HASH_RXQ_TCPV4;
+ } else if (rss && layer != HASH_RXQ_ETH &&
+ !(rss & hash_rxq_init[layer].dpdk_rss_hf)) {
+ /* If RSS not match L4 pattern, try L3 RSS. */
+ if (layer < HASH_RXQ_IPV4)
+ layer = HASH_RXQ_IPV4;
+ else if (layer > HASH_RXQ_IPV4 && layer < HASH_RXQ_IPV6)
+ layer = HASH_RXQ_IPV6;
+ start = layer;
+ } else {
+ start = layer;
+ }
+ /* Scan first valid hash type. */
+ for (i = start; rss && i <= layer; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ if (hash_rxq_init[i].dpdk_rss_hf & rss)
+ break;
+ }
+ if (rss && i <= layer)
+ parser->queue[layer].hash_fields =
+ hash_rxq_init[i].hash_fields;
+ /* Trim unused hash types. */
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser->queue[i].ibv_attr && i != layer) {
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ }
+ } else {
+ /* Expand for inner or normal RSS. */
+ if (rss && (layer == HASH_RXQ_ETH || layer == HASH_RXQ_IPV4))
+ start = HASH_RXQ_TCPV4;
+ else if (rss && layer == HASH_RXQ_IPV6)
+ start = HASH_RXQ_TCPV6;
+ else
+ start = layer;
+ /* For L4 pattern, try L3 RSS if no L4 RSS. */
+ /* Trim unused hash types. */
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ if (i < start || i > layer) {
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ continue;
+ }
+ if (!rss)
+ continue;
+ if (hash_rxq_init[i].dpdk_rss_hf & rss) {
+ parser->queue[i].hash_fields =
+ hash_rxq_init[i].hash_fields;
+ } else if (i != layer) {
+ /* Remove unused RSS expansion. */
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ } else if (layer < HASH_RXQ_IPV4 &&
+ (hash_rxq_init[HASH_RXQ_IPV4].dpdk_rss_hf &
+ rss)) {
+ /* Allow IPv4 RSS on L4 pattern. */
+ parser->queue[i].hash_fields =
+ hash_rxq_init[HASH_RXQ_IPV4]
+ .hash_fields;
+ } else if (i > HASH_RXQ_IPV4 && i < HASH_RXQ_IPV6 &&
+ (hash_rxq_init[HASH_RXQ_IPV6].dpdk_rss_hf &
+ rss)) {
+ /* Allow IPv4 RSS on L4 pattern. */
+ parser->queue[i].hash_fields =
+ hash_rxq_init[HASH_RXQ_IPV6]
+ .hash_fields;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
* Validate and convert a flow supported by the NIC.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[in] attr
* Flow rule attributes.
* @param[in] pattern
@@ -1027,7 +1268,7 @@ fill:
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert(struct priv *priv,
+mlx5_flow_convert(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
@@ -1044,48 +1285,36 @@ priv_flow_convert(struct priv *priv,
.layer = HASH_RXQ_ETH,
.mark_id = MLX5_FLOW_MARK_DEFAULT,
};
- ret = priv_flow_convert_attributes(priv, attr, error, parser);
+ ret = mlx5_flow_convert_attributes(attr, error);
if (ret)
return ret;
- ret = priv_flow_convert_actions(priv, actions, error, parser);
+ ret = mlx5_flow_convert_actions(dev, actions, error, parser);
if (ret)
return ret;
- ret = priv_flow_convert_items_validate(priv, items, error, parser);
+ ret = mlx5_flow_convert_items_validate(dev, items, error, parser);
if (ret)
return ret;
- priv_flow_convert_finalise(priv, parser);
+ mlx5_flow_convert_finalise(parser);
/*
* Second step.
* Allocate the memory space to store verbs specifications.
*/
if (parser->drop) {
- unsigned int priority =
- attr->priority +
- hash_rxq_init[HASH_RXQ_ETH].flow_priority;
unsigned int offset = parser->queue[HASH_RXQ_ETH].offset;
parser->queue[HASH_RXQ_ETH].ibv_attr =
- priv_flow_convert_allocate(priv, priority,
- offset, error);
+ mlx5_flow_convert_allocate(offset, error);
if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
- return ENOMEM;
+ goto exit_enomem;
parser->queue[HASH_RXQ_ETH].offset =
sizeof(struct ibv_flow_attr);
} else {
for (i = 0; i != hash_rxq_init_n; ++i) {
- unsigned int priority =
- attr->priority +
- hash_rxq_init[i].flow_priority;
unsigned int offset;
- if (!(parser->rss_conf.rss_hf &
- hash_rxq_init[i].dpdk_rss_hf) &&
- (i != HASH_RXQ_ETH))
- continue;
offset = parser->queue[i].offset;
parser->queue[i].ibv_attr =
- priv_flow_convert_allocate(priv, priority,
- offset, error);
+ mlx5_flow_convert_allocate(offset, error);
if (!parser->queue[i].ibv_attr)
goto exit_enomem;
parser->queue[i].offset = sizeof(struct ibv_flow_attr);
@@ -1093,7 +1322,15 @@ priv_flow_convert(struct priv *priv,
}
/* Third step. Conversion parse, fill the specifications. */
parser->inner = 0;
+ parser->tunnel = 0;
+ parser->layer = HASH_RXQ_ETH;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ struct mlx5_flow_data data = {
+ .dev = dev,
+ .parser = parser,
+ .error = error,
+ };
+
if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
continue;
cur_item = &mlx5_flow_items[items->type];
@@ -1101,32 +1338,26 @@ priv_flow_convert(struct priv *priv,
(cur_item->default_mask ?
cur_item->default_mask :
cur_item->mask),
- parser);
- if (ret) {
- rte_flow_error_set(error, ret,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
+ &data);
+ if (ret)
goto exit_free;
- }
}
+ if (!parser->drop) {
+ /* RSS check, remove unused hash types. */
+ ret = mlx5_flow_convert_rss(parser);
+ if (ret)
+ goto exit_free;
+ /* Complete missing specification. */
+ mlx5_flow_convert_finalise(parser);
+ }
+ mlx5_flow_update_priority(dev, parser, attr);
if (parser->mark)
mlx5_flow_create_flag_mark(parser, parser->mark_id);
if (parser->count && parser->create) {
- mlx5_flow_create_count(priv, parser);
+ mlx5_flow_create_count(dev, parser);
if (!parser->cs)
goto exit_count_error;
}
- /*
- * Last step. Complete missing specification to reach the RSS
- * configuration.
- */
- if (!parser->drop) {
- priv_flow_convert_finalise(priv, parser);
- } else {
- parser->queue[HASH_RXQ_ETH].ibv_attr->priority =
- attr->priority +
- hash_rxq_init[parser->layer].flow_priority;
- }
exit_free:
/* Only verification is expected, all resources should be released. */
if (!parser->create) {
@@ -1145,13 +1376,13 @@ exit_enomem:
parser->queue[i].ibv_attr = NULL;
}
}
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot allocate verbs spec attributes.");
- return ret;
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot allocate verbs spec attributes");
+ return -rte_errno;
exit_count_error:
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create counter.");
- return rte_errno;
+ NULL, "cannot create counter");
+ return -rte_errno;
}
/**
@@ -1174,17 +1405,11 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
for (i = 0; i != hash_rxq_init_n; ++i) {
if (!parser->queue[i].ibv_attr)
continue;
- /* Specification must be the same l3 type or none. */
- if (parser->layer == HASH_RXQ_ETH ||
- (hash_rxq_init[parser->layer].ip_version ==
- hash_rxq_init[i].ip_version) ||
- (hash_rxq_init[i].ip_version == 0)) {
- dst = (void *)((uintptr_t)parser->queue[i].ibv_attr +
- parser->queue[i].offset);
- memcpy(dst, src, size);
- ++parser->queue[i].ibv_attr->num_of_specs;
- parser->queue[i].offset += size;
- }
+ dst = (void *)((uintptr_t)parser->queue[i].ibv_attr +
+ parser->queue[i].offset);
+ memcpy(dst, src, size);
+ ++parser->queue[i].ibv_attr->num_of_specs;
+ parser->queue[i].offset += size;
}
}
@@ -1197,24 +1422,25 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct mlx5_flow_data *data)
{
const struct rte_flow_item_eth *spec = item->spec;
const struct rte_flow_item_eth *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ struct mlx5_flow_parse *parser = data->parser;
const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
struct ibv_flow_spec_eth eth = {
.type = parser->inner | IBV_FLOW_SPEC_ETH,
.size = eth_size,
};
- /* Don't update layer for the inner pattern. */
- if (!parser->inner)
- parser->layer = HASH_RXQ_ETH;
+ parser->layer = HASH_RXQ_ETH;
if (spec) {
unsigned int i;
@@ -1246,17 +1472,21 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_vlan(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct mlx5_flow_data *data)
{
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ struct mlx5_flow_parse *parser = data->parser;
struct ibv_flow_spec_eth *eth;
const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+ const char *msg = "VLAN cannot be empty";
if (spec) {
unsigned int i;
@@ -1272,9 +1502,26 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,
eth->val.vlan_tag = spec->tci;
eth->mask.vlan_tag = mask->tci;
eth->val.vlan_tag &= eth->mask.vlan_tag;
+ /*
+ * From verbs perspective an empty VLAN is equivalent
+ * to a packet without VLAN layer.
+ */
+ if (!eth->mask.vlan_tag)
+ goto error;
+ /* Outer TPID cannot be matched. */
+ if (eth->mask.ether_type) {
+ msg = "VLAN TPID matching is not supported";
+ goto error;
+ }
+ eth->val.ether_type = spec->inner_type;
+ eth->mask.ether_type = mask->inner_type;
+ eth->val.ether_type &= eth->mask.ether_type;
}
+ return 0;
}
- return 0;
+error:
+ return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
}
/**
@@ -1286,24 +1533,35 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_ipv4(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct mlx5_flow_data *data)
{
+ struct priv *priv = data->dev->data->dev_private;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ struct mlx5_flow_parse *parser = data->parser;
unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
struct ibv_flow_spec_ipv4_ext ipv4 = {
.type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,
.size = ipv4_size,
};
- /* Don't update layer for the inner pattern. */
- if (!parser->inner)
- parser->layer = HASH_RXQ_IPV4;
+ if (parser->layer == HASH_RXQ_TUNNEL &&
+ parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] &&
+ !priv->config.l3_vxlan_en)
+ return rte_flow_error_set(data->error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 VXLAN not enabled by device"
+ " parameter and/or not configured"
+ " in firmware");
+ parser->layer = HASH_RXQ_IPV4;
if (spec) {
if (!mask)
mask = default_mask;
@@ -1338,24 +1596,35 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_ipv6(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct mlx5_flow_data *data)
{
+ struct priv *priv = data->dev->data->dev_private;
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ struct mlx5_flow_parse *parser = data->parser;
unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
struct ibv_flow_spec_ipv6 ipv6 = {
.type = parser->inner | IBV_FLOW_SPEC_IPV6,
.size = ipv6_size,
};
- /* Don't update layer for the inner pattern. */
- if (!parser->inner)
- parser->layer = HASH_RXQ_IPV6;
+ if (parser->layer == HASH_RXQ_TUNNEL &&
+ parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] &&
+ !priv->config.l3_vxlan_en)
+ return rte_flow_error_set(data->error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 VXLAN not enabled by device"
+ " parameter and/or not configured"
+ " in firmware");
+ parser->layer = HASH_RXQ_IPV6;
if (spec) {
unsigned int i;
uint32_t vtc_flow_val;
@@ -1410,28 +1679,28 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_udp(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct mlx5_flow_data *data)
{
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ struct mlx5_flow_parse *parser = data->parser;
unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
struct ibv_flow_spec_tcp_udp udp = {
.type = parser->inner | IBV_FLOW_SPEC_UDP,
.size = udp_size,
};
- /* Don't update layer for the inner pattern. */
- if (!parser->inner) {
- if (parser->layer == HASH_RXQ_IPV4)
- parser->layer = HASH_RXQ_UDPV4;
- else
- parser->layer = HASH_RXQ_UDPV6;
- }
+ if (parser->layer == HASH_RXQ_IPV4)
+ parser->layer = HASH_RXQ_UDPV4;
+ else
+ parser->layer = HASH_RXQ_UDPV6;
if (spec) {
if (!mask)
mask = default_mask;
@@ -1456,28 +1725,28 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_tcp(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct mlx5_flow_data *data)
{
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ struct mlx5_flow_parse *parser = data->parser;
unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
struct ibv_flow_spec_tcp_udp tcp = {
.type = parser->inner | IBV_FLOW_SPEC_TCP,
.size = tcp_size,
};
- /* Don't update layer for the inner pattern. */
- if (!parser->inner) {
- if (parser->layer == HASH_RXQ_IPV4)
- parser->layer = HASH_RXQ_TCPV4;
- else
- parser->layer = HASH_RXQ_TCPV6;
- }
+ if (parser->layer == HASH_RXQ_IPV4)
+ parser->layer = HASH_RXQ_TCPV4;
+ else
+ parser->layer = HASH_RXQ_TCPV6;
if (spec) {
if (!mask)
mask = default_mask;
@@ -1502,15 +1771,18 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_vxlan(const struct rte_flow_item *item,
const void *default_mask,
- void *data)
+ struct mlx5_flow_data *data)
{
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ struct mlx5_flow_parse *parser = data->parser;
unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
struct ibv_flow_spec_tunnel vxlan = {
.type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
@@ -1523,6 +1795,9 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
id.vni[0] = 0;
parser->inner = IBV_FLOW_SPEC_INNER;
+ parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)];
+ parser->out_layer = parser->layer;
+ parser->layer = HASH_RXQ_TUNNEL;
if (spec) {
if (!mask)
mask = default_mask;
@@ -1541,19 +1816,260 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
* before will also match this rule.
* To avoid such situation, VNI 0 is currently refused.
*/
- if (!vxlan.val.tunnel_id)
- return EINVAL;
+ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */
+ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id)
+ return rte_flow_error_set(data->error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN vni cannot be 0");
mlx5_flow_create_copy(parser, &vxlan, size);
return 0;
}
/**
+ * Convert VXLAN-GPE item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,
+ const void *default_mask,
+ struct mlx5_flow_data *data)
+{
+ struct priv *priv = data->dev->data->dev_private;
+ const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+ const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+ struct mlx5_flow_parse *parser = data->parser;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan = {
+ .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id;
+
+ if (!priv->config.l3_vxlan_en)
+ return rte_flow_error_set(data->error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 VXLAN not enabled by device"
+ " parameter and/or not configured"
+ " in firmware");
+ id.vni[0] = 0;
+ parser->inner = IBV_FLOW_SPEC_INNER;
+ parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)];
+ parser->out_layer = parser->layer;
+ parser->layer = HASH_RXQ_TUNNEL;
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan.mask.tunnel_id = id.vlan_id;
+ if (spec->protocol)
+ return rte_flow_error_set(data->error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE protocol not"
+ " supported");
+ /* Remove unwanted bits from values. */
+ vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+ * layer is defined in the Verbs specification it is interpreted as
+ * wildcard and all packets will match this rule, if it follows a full
+ * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+ * before will also match this rule.
+ * To avoid such situation, VNI 0 is currently refused.
+ */
+ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */
+ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id)
+ return rte_flow_error_set(data->error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE vni cannot be 0");
+ mlx5_flow_create_copy(parser, &vxlan, size);
+ return 0;
+}
+
+/**
+ * Convert GRE item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_create_gre(const struct rte_flow_item *item,
+ const void *default_mask,
+ struct mlx5_flow_data *data)
+{
+ struct mlx5_flow_parse *parser = data->parser;
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ (void)default_mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel tunnel = {
+ .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+#else
+ const struct rte_flow_item_gre *spec = item->spec;
+ const struct rte_flow_item_gre *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_gre);
+ struct ibv_flow_spec_gre tunnel = {
+ .type = parser->inner | IBV_FLOW_SPEC_GRE,
+ .size = size,
+ };
+#endif
+ struct ibv_flow_spec_ipv4_ext *ipv4;
+ struct ibv_flow_spec_ipv6 *ipv6;
+ unsigned int i;
+
+ parser->inner = IBV_FLOW_SPEC_INNER;
+ parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)];
+ parser->out_layer = parser->layer;
+ parser->layer = HASH_RXQ_TUNNEL;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+ tunnel.val.protocol = spec->protocol;
+ tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+ tunnel.mask.protocol = mask->protocol;
+ /* Remove unwanted bits from values. */
+ tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+ tunnel.val.protocol &= tunnel.mask.protocol;
+ tunnel.val.key &= tunnel.mask.key;
+ }
+#endif
+ /* Update encapsulation IP layer protocol. */
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ if (parser->out_layer == HASH_RXQ_IPV4) {
+ ipv4 = (void *)((uintptr_t)parser->queue[i].ibv_attr +
+ parser->queue[i].offset -
+ sizeof(struct ibv_flow_spec_ipv4_ext));
+ if (ipv4->mask.proto && ipv4->val.proto != MLX5_GRE)
+ break;
+ ipv4->val.proto = MLX5_GRE;
+ ipv4->mask.proto = 0xff;
+ } else if (parser->out_layer == HASH_RXQ_IPV6) {
+ ipv6 = (void *)((uintptr_t)parser->queue[i].ibv_attr +
+ parser->queue[i].offset -
+ sizeof(struct ibv_flow_spec_ipv6));
+ if (ipv6->mask.next_hdr &&
+ ipv6->val.next_hdr != MLX5_GRE)
+ break;
+ ipv6->val.next_hdr = MLX5_GRE;
+ ipv6->mask.next_hdr = 0xff;
+ }
+ }
+ if (i != hash_rxq_init_n)
+ return rte_flow_error_set(data->error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "IP protocol of GRE must be 47");
+ mlx5_flow_create_copy(parser, &tunnel, size);
+ return 0;
+}
+
+/**
+ * Convert MPLS item to Verbs specification.
+ * MPLS tunnel types currently supported are MPLS-in-GRE and MPLS-in-UDP.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_create_mpls(const struct rte_flow_item *item,
+ const void *default_mask,
+ struct mlx5_flow_data *data)
+{
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ (void)default_mask;
+ return rte_flow_error_set(data->error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MPLS is not supported by driver");
+#else
+ const struct rte_flow_item_mpls *spec = item->spec;
+ const struct rte_flow_item_mpls *mask = item->mask;
+ struct mlx5_flow_parse *parser = data->parser;
+ unsigned int size = sizeof(struct ibv_flow_spec_mpls);
+ struct ibv_flow_spec_mpls mpls = {
+ .type = IBV_FLOW_SPEC_MPLS,
+ .size = size,
+ };
+
+ parser->inner = IBV_FLOW_SPEC_INNER;
+ if (parser->layer == HASH_RXQ_UDPV4 ||
+ parser->layer == HASH_RXQ_UDPV6) {
+ parser->tunnel =
+ ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)];
+ parser->out_layer = parser->layer;
+ } else {
+ parser->tunnel =
+ ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)];
+ /* parser->out_layer stays as in GRE out_layer. */
+ }
+ parser->layer = HASH_RXQ_TUNNEL;
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ /*
+ * The verbs label field includes the entire MPLS header:
+ * bits 0:19 - label value field.
+ * bits 20:22 - traffic class field.
+ * bits 23 - bottom of stack bit.
+ * bits 24:31 - ttl field.
+ */
+ mpls.val.label = *(const uint32_t *)spec;
+ mpls.mask.label = *(const uint32_t *)mask;
+ /* Remove unwanted bits from values. */
+ mpls.val.label &= mpls.mask.label;
+ }
+ mlx5_flow_create_copy(parser, &mpls, size);
+ return 0;
+#endif
+}
+
+/**
* Convert mark/flag action to Verbs specification.
*
* @param parser
* Internal parser structure.
* @param mark_id
* Mark identifier.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
@@ -1573,19 +2089,20 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
/**
* Convert count action to Verbs specification.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param parser
* Pointer to MLX5 flow parser structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_flow_create_count(struct priv *priv __rte_unused,
+mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
struct mlx5_flow_parse *parser __rte_unused)
{
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ struct priv *priv = dev->data->dev_private;
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
struct ibv_counter_set_init_attr init_attr = {0};
struct ibv_flow_spec_counter_action counter = {
@@ -1596,8 +2113,10 @@ mlx5_flow_create_count(struct priv *priv __rte_unused,
init_attr.counter_set_id = 0;
parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);
- if (!parser->cs)
- return EINVAL;
+ if (!parser->cs) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
counter.counter_set_handle = parser->cs->handle;
mlx5_flow_create_copy(parser, &counter, size);
#endif
@@ -1607,8 +2126,8 @@ mlx5_flow_create_count(struct priv *priv __rte_unused,
/**
* Complete flow rule creation with a drop queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param parser
* Internal parser structure.
* @param flow
@@ -1617,17 +2136,17 @@ mlx5_flow_create_count(struct priv *priv __rte_unused,
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_create_action_queue_drop(struct priv *priv,
+mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
struct mlx5_flow_parse *parser,
struct rte_flow *flow,
struct rte_flow_error *error)
{
+ struct priv *priv = dev->data->dev_private;
struct ibv_flow_spec_action_drop *drop;
unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
- int err = 0;
assert(priv->pd);
assert(priv->ctx);
@@ -1644,7 +2163,7 @@ priv_flow_create_action_queue_drop(struct priv *priv,
parser->queue[HASH_RXQ_ETH].ibv_attr;
if (parser->count)
flow->cs = parser->cs;
- if (!priv->dev->data->dev_started)
+ if (!dev->data->dev_started)
return 0;
parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
flow->frxq[HASH_RXQ_ETH].ibv_flow =
@@ -1653,7 +2172,6 @@ priv_flow_create_action_queue_drop(struct priv *priv,
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
- err = ENOMEM;
goto error;
}
return 0;
@@ -1673,14 +2191,14 @@ error:
flow->cs = NULL;
parser->cs = NULL;
}
- return err;
+ return -rte_errno;
}
/**
* Create hash Rx queues when RSS is enabled.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param parser
* Internal parser structure.
* @param flow
@@ -1689,10 +2207,10 @@ error:
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_create_action_queue_rss(struct priv *priv,
+mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
struct mlx5_flow_parse *parser,
struct rte_flow *flow,
struct rte_flow_error *error)
@@ -1700,46 +2218,144 @@ priv_flow_create_action_queue_rss(struct priv *priv,
unsigned int i;
for (i = 0; i != hash_rxq_init_n; ++i) {
- uint64_t hash_fields;
-
if (!parser->queue[i].ibv_attr)
continue;
flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;
parser->queue[i].ibv_attr = NULL;
- hash_fields = hash_rxq_init[i].hash_fields;
- if (!priv->dev->data->dev_started)
+ flow->frxq[i].hash_fields = parser->queue[i].hash_fields;
+ if (!dev->data->dev_started)
continue;
flow->frxq[i].hrxq =
- mlx5_priv_hrxq_get(priv,
- parser->rss_conf.rss_key,
- parser->rss_conf.rss_key_len,
- hash_fields,
- parser->queues,
- parser->queues_n);
+ mlx5_hrxq_get(dev,
+ parser->rss_conf.key,
+ parser->rss_conf.key_len,
+ flow->frxq[i].hash_fields,
+ parser->rss_conf.queue,
+ parser->rss_conf.queue_num,
+ parser->tunnel,
+ parser->rss_conf.level);
if (flow->frxq[i].hrxq)
continue;
flow->frxq[i].hrxq =
- mlx5_priv_hrxq_new(priv,
- parser->rss_conf.rss_key,
- parser->rss_conf.rss_key_len,
- hash_fields,
- parser->queues,
- parser->queues_n);
+ mlx5_hrxq_new(dev,
+ parser->rss_conf.key,
+ parser->rss_conf.key_len,
+ flow->frxq[i].hash_fields,
+ parser->rss_conf.queue,
+ parser->rss_conf.queue_num,
+ parser->tunnel,
+ parser->rss_conf.level);
if (!flow->frxq[i].hrxq) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot create hash rxq");
- return ENOMEM;
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "cannot create hash rxq");
}
}
return 0;
}
/**
+ * RXQ update after flow rule creation.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to the flow rule.
+ */
+static void
+mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+ unsigned int j;
+
+ if (!dev->data->dev_started)
+ return;
+ for (i = 0; i != flow->rss_conf.queue_num; ++i) {
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
+ [(*flow->queues)[i]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ uint8_t tunnel = PTYPE_IDX(flow->tunnel);
+
+ rxq_data->mark |= flow->mark;
+ if (!tunnel)
+ continue;
+ rxq_ctrl->tunnel_types[tunnel] += 1;
+ /* Clear tunnel type if more than one tunnel types set. */
+ for (j = 0; j != RTE_DIM(rxq_ctrl->tunnel_types); ++j) {
+ if (j == tunnel)
+ continue;
+ if (rxq_ctrl->tunnel_types[j] > 0) {
+ rxq_data->tunnel = 0;
+ break;
+ }
+ }
+ if (j == RTE_DIM(rxq_ctrl->tunnel_types))
+ rxq_data->tunnel = flow->tunnel;
+ }
+}
+
+/**
+ * Dump flow hash RX queue detail.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to the rte_flow.
+ * @param hrxq_idx
+ * Hash RX queue index.
+ */
+static void
+mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ unsigned int hrxq_idx __rte_unused)
+{
+#ifndef NDEBUG
+ uintptr_t spec_ptr;
+ uint16_t j;
+ char buf[256];
+ uint8_t off;
+ uint64_t extra_hash_fields = 0;
+
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (flow->tunnel && flow->rss_conf.level > 1)
+ extra_hash_fields = (uint32_t)IBV_RX_HASH_INNER;
+#endif
+ spec_ptr = (uintptr_t)(flow->frxq[hrxq_idx].ibv_attr + 1);
+ for (j = 0, off = 0; j < flow->frxq[hrxq_idx].ibv_attr->num_of_specs;
+ j++) {
+ struct ibv_flow_spec *spec = (void *)spec_ptr;
+ off += sprintf(buf + off, " %x(%hu)", spec->hdr.type,
+ spec->hdr.size);
+ spec_ptr += spec->hdr.size;
+ }
+ DRV_LOG(DEBUG,
+ "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p,"
+ " hash:%" PRIx64 "/%u specs:%hhu(%hu), priority:%hu, type:%d,"
+ " flags:%x, comp_mask:%x specs:%s",
+ dev->data->port_id, (void *)flow, hrxq_idx,
+ (void *)flow->frxq[hrxq_idx].hrxq,
+ (void *)flow->frxq[hrxq_idx].hrxq->qp,
+ (void *)flow->frxq[hrxq_idx].hrxq->ind_table,
+ (flow->frxq[hrxq_idx].hash_fields | extra_hash_fields),
+ flow->rss_conf.queue_num,
+ flow->frxq[hrxq_idx].ibv_attr->num_of_specs,
+ flow->frxq[hrxq_idx].ibv_attr->size,
+ flow->frxq[hrxq_idx].ibv_attr->priority,
+ flow->frxq[hrxq_idx].ibv_attr->type,
+ flow->frxq[hrxq_idx].ibv_attr->flags,
+ flow->frxq[hrxq_idx].ibv_attr->comp_mask,
+ buf);
+#endif
+}
+
+/**
* Complete flow rule creation.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param parser
* Internal parser structure.
* @param flow
@@ -1748,26 +2364,28 @@ priv_flow_create_action_queue_rss(struct priv *priv,
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_create_action_queue(struct priv *priv,
+mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
struct mlx5_flow_parse *parser,
struct rte_flow *flow,
struct rte_flow_error *error)
{
- int err = 0;
+ struct priv *priv __rte_unused = dev->data->dev_private;
+ int ret;
unsigned int i;
+ unsigned int flows_n = 0;
assert(priv->pd);
assert(priv->ctx);
assert(!parser->drop);
- err = priv_flow_create_action_queue_rss(priv, parser, flow, error);
- if (err)
+ ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);
+ if (ret)
goto error;
if (parser->count)
flow->cs = parser->cs;
- if (!priv->dev->data->dev_started)
+ if (!dev->data->dev_started)
return 0;
for (i = 0; i != hash_rxq_init_n; ++i) {
if (!flow->frxq[i].hrxq)
@@ -1775,26 +2393,24 @@ priv_flow_create_action_queue(struct priv *priv,
flow->frxq[i].ibv_flow =
mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
flow->frxq[i].ibv_attr);
+ mlx5_flow_dump(dev, flow, i);
if (!flow->frxq[i].ibv_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
- err = ENOMEM;
goto error;
}
- DEBUG("%p type %d QP %p ibv_flow %p",
- (void *)flow, i,
- (void *)flow->frxq[i].hrxq,
- (void *)flow->frxq[i].ibv_flow);
+ ++flows_n;
}
- for (i = 0; i != parser->queues_n; ++i) {
- struct mlx5_rxq_data *q =
- (*priv->rxqs)[parser->queues[i]];
-
- q->mark |= parser->mark;
+ if (!flows_n) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "internal error in flow creation");
+ goto error;
}
+ mlx5_flow_create_update_rxqs(dev, flow);
return 0;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
assert(flow);
for (i = 0; i != hash_rxq_init_n; ++i) {
if (flow->frxq[i].ibv_flow) {
@@ -1803,7 +2419,7 @@ error:
claim_zero(mlx5_glue->destroy_flow(ibv_flow));
}
if (flow->frxq[i].hrxq)
- mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+ mlx5_hrxq_release(dev, flow->frxq[i].hrxq);
if (flow->frxq[i].ibv_attr)
rte_free(flow->frxq[i].ibv_attr);
}
@@ -1812,14 +2428,15 @@ error:
flow->cs = NULL;
parser->cs = NULL;
}
- return err;
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
* Convert a flow.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
* @param[in] attr
@@ -1832,26 +2449,27 @@ error:
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow on success, NULL otherwise.
+ * A flow on success, NULL otherwise and rte_errno is set.
*/
static struct rte_flow *
-priv_flow_create(struct priv *priv,
- struct mlx5_flows *list,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+mlx5_flow_list_create(struct rte_eth_dev *dev,
+ struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
struct mlx5_flow_parse parser = { .create = 1, };
struct rte_flow *flow = NULL;
unsigned int i;
- int err;
+ int ret;
- err = priv_flow_convert(priv, attr, items, actions, error, &parser);
- if (err)
+ ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
+ if (ret)
goto exit;
flow = rte_calloc(__func__, 1,
- sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
+ sizeof(*flow) +
+ parser.rss_conf.queue_num * sizeof(uint16_t),
0);
if (!flow) {
rte_flow_error_set(error, ENOMEM,
@@ -1860,28 +2478,38 @@ priv_flow_create(struct priv *priv,
"cannot allocate flow memory");
return NULL;
}
- /* Copy queues configuration. */
+ /* Copy configuration. */
flow->queues = (uint16_t (*)[])(flow + 1);
- memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t));
- flow->queues_n = parser.queues_n;
+ flow->tunnel = parser.tunnel;
+ flow->rss_conf = (struct rte_flow_action_rss){
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = parser.rss_conf.level,
+ .types = parser.rss_conf.types,
+ .key_len = parser.rss_conf.key_len,
+ .queue_num = parser.rss_conf.queue_num,
+ .key = memcpy(flow->rss_key, parser.rss_conf.key,
+ sizeof(*parser.rss_conf.key) *
+ parser.rss_conf.key_len),
+ .queue = memcpy(flow->queues, parser.rss_conf.queue,
+ sizeof(*parser.rss_conf.queue) *
+ parser.rss_conf.queue_num),
+ };
flow->mark = parser.mark;
- /* Copy RSS configuration. */
- flow->rss_conf = parser.rss_conf;
- flow->rss_conf.rss_key = flow->rss_key;
- memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
/* finalise the flow. */
if (parser.drop)
- err = priv_flow_create_action_queue_drop(priv, &parser, flow,
+ ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
error);
else
- err = priv_flow_create_action_queue(priv, &parser, flow, error);
- if (err)
+ ret = mlx5_flow_create_action_queue(dev, &parser, flow, error);
+ if (ret)
goto exit;
TAILQ_INSERT_TAIL(list, flow, next);
- DEBUG("Flow created %p", (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow created %p", dev->data->port_id,
+ (void *)flow);
return flow;
exit:
- ERROR("flow creation error: %s", error->message);
+ DRV_LOG(ERR, "port %u flow creation error: %s", dev->data->port_id,
+ error->message);
for (i = 0; i != hash_rxq_init_n; ++i) {
if (parser.queue[i].ibv_attr)
rte_free(parser.queue[i].ibv_attr);
@@ -1903,14 +2531,9 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- int ret;
struct mlx5_flow_parse parser = { .create = 0, };
- priv_lock(priv);
- ret = priv_flow_convert(priv, attr, items, actions, error, &parser);
- priv_unlock(priv);
- return ret;
+ return mlx5_flow_convert(dev, attr, items, actions, error, &parser);
}
/**
@@ -1927,35 +2550,60 @@ mlx5_flow_create(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
- struct rte_flow *flow;
- priv_lock(priv);
- flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
- error);
- priv_unlock(priv);
- return flow;
+ return mlx5_flow_list_create(dev, &priv->flows, attr, items, actions,
+ error);
}
/**
- * Destroy a flow.
+ * Destroy a flow in a list.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
* @param[in] flow
* Flow to destroy.
*/
static void
-priv_flow_destroy(struct priv *priv,
- struct mlx5_flows *list,
- struct rte_flow *flow)
+mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ struct rte_flow *flow)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
- if (flow->drop || !flow->mark)
+ if (flow->drop || !dev->data->dev_started)
goto free;
- for (i = 0; i != flow->queues_n; ++i) {
+ for (i = 0; flow->tunnel && i != flow->rss_conf.queue_num; ++i) {
+ /* Update queue tunnel type. */
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
+ [(*flow->queues)[i]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ uint8_t tunnel = PTYPE_IDX(flow->tunnel);
+
+ assert(rxq_ctrl->tunnel_types[tunnel] > 0);
+ rxq_ctrl->tunnel_types[tunnel] -= 1;
+ if (!rxq_ctrl->tunnel_types[tunnel]) {
+ /* Update tunnel type. */
+ uint8_t j;
+ uint8_t types = 0;
+ uint8_t last;
+
+ for (j = 0; j < RTE_DIM(rxq_ctrl->tunnel_types); j++)
+ if (rxq_ctrl->tunnel_types[j]) {
+ types += 1;
+ last = j;
+ }
+ /* Keep same if more than one tunnel types left. */
+ if (types == 1)
+ rxq_data->tunnel = ptype_ext[last];
+ else if (types == 0)
+ /* No tunnel type left. */
+ rxq_data->tunnel = 0;
+ }
+ }
+ for (i = 0; flow->mark && i != flow->rss_conf.queue_num; ++i) {
struct rte_flow *tmp;
int mark = 0;
@@ -1998,7 +2646,7 @@ free:
claim_zero(mlx5_glue->destroy_flow
(frxq->ibv_flow));
if (frxq->hrxq)
- mlx5_priv_hrxq_release(priv, frxq->hrxq);
+ mlx5_hrxq_release(dev, frxq->hrxq);
if (frxq->ibv_attr)
rte_free(frxq->ibv_attr);
}
@@ -2008,53 +2656,60 @@ free:
flow->cs = NULL;
}
TAILQ_REMOVE(list, flow, next);
- DEBUG("Flow destroyed %p", (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow destroyed %p", dev->data->port_id,
+ (void *)flow);
rte_free(flow);
}
/**
* Destroy all flows.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
*/
void
-priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow;
flow = TAILQ_FIRST(list);
- priv_flow_destroy(priv, list, flow);
+ mlx5_flow_list_destroy(dev, list, flow);
}
}
/**
* Create drop queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_flow_create_drop_queue(struct priv *priv)
+mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq_drop *fdq = NULL;
assert(priv->pd);
assert(priv->ctx);
fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
if (!fdq) {
- WARN("cannot allocate memory for drop queue");
- goto error;
+ DRV_LOG(WARNING,
+ "port %u cannot allocate memory for drop queue",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!fdq->cq) {
- WARN("cannot allocate CQ for drop queue");
+ DRV_LOG(WARNING, "port %u cannot allocate CQ for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
goto error;
}
fdq->wq = mlx5_glue->create_wq
@@ -2067,7 +2722,9 @@ priv_flow_create_drop_queue(struct priv *priv)
.cq = fdq->cq,
});
if (!fdq->wq) {
- WARN("cannot allocate WQ for drop queue");
+ DRV_LOG(WARNING, "port %u cannot allocate WQ for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
goto error;
}
fdq->ind_table = mlx5_glue->create_rwq_ind_table
@@ -2078,7 +2735,11 @@ priv_flow_create_drop_queue(struct priv *priv)
.comp_mask = 0,
});
if (!fdq->ind_table) {
- WARN("cannot allocate indirection table for drop queue");
+ DRV_LOG(WARNING,
+ "port %u cannot allocate indirection table for drop"
+ " queue",
+ dev->data->port_id);
+ rte_errno = errno;
goto error;
}
fdq->qp = mlx5_glue->create_qp_ex
@@ -2100,7 +2761,9 @@ priv_flow_create_drop_queue(struct priv *priv)
.pd = priv->pd
});
if (!fdq->qp) {
- WARN("cannot allocate QP for drop queue");
+ DRV_LOG(WARNING, "port %u cannot allocate QP for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
goto error;
}
priv->flow_drop_queue = fdq;
@@ -2117,18 +2780,19 @@ error:
if (fdq)
rte_free(fdq);
priv->flow_drop_queue = NULL;
- return -1;
+ return -rte_errno;
}
/**
* Delete drop queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_flow_delete_drop_queue(struct priv *priv)
+mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
if (!fdq)
@@ -2148,18 +2812,19 @@ priv_flow_delete_drop_queue(struct priv *priv)
/**
* Remove all flows.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
*/
void
-priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
+mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
+ unsigned int i;
TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
- unsigned int i;
struct mlx5_ind_table_ibv *ind_tbl = NULL;
if (flow->drop) {
@@ -2168,7 +2833,8 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
claim_zero(mlx5_glue->destroy_flow
(flow->frxq[HASH_RXQ_ETH].ibv_flow));
flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
- DEBUG("Flow %p removed", (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p removed",
+ dev->data->port_id, (void *)flow);
/* Next flow. */
continue;
}
@@ -2198,27 +2864,41 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
claim_zero(mlx5_glue->destroy_flow
(flow->frxq[i].ibv_flow));
flow->frxq[i].ibv_flow = NULL;
- mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+ mlx5_hrxq_release(dev, flow->frxq[i].hrxq);
flow->frxq[i].hrxq = NULL;
}
- DEBUG("Flow %p removed", (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id,
+ (void *)flow);
+ }
+ /* Cleanup Rx queue tunnel info. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *q = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(q, struct mlx5_rxq_ctrl, rxq);
+
+ if (!q)
+ continue;
+ memset((void *)rxq_ctrl->tunnel_types, 0,
+ sizeof(rxq_ctrl->tunnel_types));
+ q->tunnel = 0;
}
}
/**
* Add all flows.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_flow_start(struct priv *priv, struct mlx5_flows *list)
+mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
TAILQ_FOREACH(flow, list, next) {
@@ -2230,12 +2910,14 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list)
(priv->flow_drop_queue->qp,
flow->frxq[HASH_RXQ_ETH].ibv_attr);
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- DEBUG("Flow %p cannot be applied",
- (void *)flow);
+ DRV_LOG(DEBUG,
+ "port %u flow %p cannot be applied",
+ dev->data->port_id, (void *)flow);
rte_errno = EINVAL;
- return rte_errno;
+ return -rte_errno;
}
- DEBUG("Flow %p applied", (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p applied",
+ dev->data->port_id, (void *)flow);
/* Next flow. */
continue;
}
@@ -2243,41 +2925,46 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list)
if (!flow->frxq[i].ibv_attr)
continue;
flow->frxq[i].hrxq =
- mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key,
- flow->rss_conf.rss_key_len,
- hash_rxq_init[i].hash_fields,
- (*flow->queues),
- flow->queues_n);
+ mlx5_hrxq_get(dev, flow->rss_conf.key,
+ flow->rss_conf.key_len,
+ flow->frxq[i].hash_fields,
+ flow->rss_conf.queue,
+ flow->rss_conf.queue_num,
+ flow->tunnel,
+ flow->rss_conf.level);
if (flow->frxq[i].hrxq)
goto flow_create;
flow->frxq[i].hrxq =
- mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key,
- flow->rss_conf.rss_key_len,
- hash_rxq_init[i].hash_fields,
- (*flow->queues),
- flow->queues_n);
+ mlx5_hrxq_new(dev, flow->rss_conf.key,
+ flow->rss_conf.key_len,
+ flow->frxq[i].hash_fields,
+ flow->rss_conf.queue,
+ flow->rss_conf.queue_num,
+ flow->tunnel,
+ flow->rss_conf.level);
if (!flow->frxq[i].hrxq) {
- DEBUG("Flow %p cannot be applied",
- (void *)flow);
+ DRV_LOG(DEBUG,
+ "port %u flow %p cannot create hash"
+ " rxq",
+ dev->data->port_id, (void *)flow);
rte_errno = EINVAL;
- return rte_errno;
+ return -rte_errno;
}
flow_create:
+ mlx5_flow_dump(dev, flow, i);
flow->frxq[i].ibv_flow =
mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
flow->frxq[i].ibv_attr);
if (!flow->frxq[i].ibv_flow) {
- DEBUG("Flow %p cannot be applied",
- (void *)flow);
+ DRV_LOG(DEBUG,
+ "port %u flow %p type %u cannot be"
+ " applied",
+ dev->data->port_id, (void *)flow, i);
rte_errno = EINVAL;
- return rte_errno;
+ return -rte_errno;
}
- DEBUG("Flow %p applied", (void *)flow);
}
- if (!flow->mark)
- continue;
- for (i = 0; i != flow->queues_n; ++i)
- (*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
+ mlx5_flow_create_update_rxqs(dev, flow);
}
return 0;
}
@@ -2285,20 +2972,21 @@ flow_create:
/**
* Verify the flow list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return the number of flows not released.
*/
int
-priv_flow_verify(struct priv *priv)
+mlx5_flow_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
int ret = 0;
TAILQ_FOREACH(flow, &priv->flows, next) {
- DEBUG("%p: flow %p still referenced", (void *)priv,
- (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p still referenced",
+ dev->data->port_id, (void *)flow);
++ret;
}
return ret;
@@ -2319,7 +3007,7 @@ priv_flow_verify(struct priv *priv)
* A VLAN flow mask to apply.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
@@ -2351,9 +3039,20 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
.type = RTE_FLOW_ITEM_TYPE_END,
},
};
+ uint16_t queue[priv->reta_idx_n];
+ struct rte_flow_action_rss action_rss = {
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = 0,
+ .types = priv->rss_conf.rss_hf,
+ .key_len = priv->rss_conf.rss_key_len,
+ .queue_num = priv->reta_idx_n,
+ .key = priv->rss_conf.rss_key,
+ .queue = queue,
+ };
struct rte_flow_action actions[] = {
{
.type = RTE_FLOW_ACTION_TYPE_RSS,
+ .conf = &action_rss,
},
{
.type = RTE_FLOW_ACTION_TYPE_END,
@@ -2362,26 +3061,17 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
struct rte_flow *flow;
struct rte_flow_error error;
unsigned int i;
- union {
- struct rte_flow_action_rss rss;
- struct {
- const struct rte_eth_rss_conf *rss_conf;
- uint16_t num;
- uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
- } local;
- } action_rss;
-
- if (!priv->reta_idx_n)
- return EINVAL;
+
+ if (!priv->reta_idx_n) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
for (i = 0; i != priv->reta_idx_n; ++i)
- action_rss.local.queue[i] = (*priv->reta_idx)[i];
- action_rss.local.rss_conf = &priv->rss_conf;
- action_rss.local.num = priv->reta_idx_n;
- actions[0].conf = (const void *)&action_rss.rss;
- flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
- &error);
+ queue[i] = (*priv->reta_idx)[i];
+ flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
+ actions, &error);
if (!flow)
- return rte_errno;
+ return -rte_errno;
return 0;
}
@@ -2396,7 +3086,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
* An Ethernet flow mask to apply.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ctrl_flow(struct rte_eth_dev *dev,
@@ -2415,14 +3105,11 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
int
mlx5_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
- struct rte_flow_error *error)
+ struct rte_flow_error *error __rte_unused)
{
struct priv *priv = dev->data->dev_private;
- (void)error;
- priv_lock(priv);
- priv_flow_destroy(priv, &priv->flows, flow);
- priv_unlock(priv);
+ mlx5_flow_list_destroy(dev, &priv->flows, flow);
return 0;
}
@@ -2434,14 +3121,11 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
*/
int
mlx5_flow_flush(struct rte_eth_dev *dev,
- struct rte_flow_error *error)
+ struct rte_flow_error *error __rte_unused)
{
struct priv *priv = dev->data->dev_private;
- (void)error;
- priv_lock(priv);
- priv_flow_flush(priv, &priv->flows);
- priv_unlock(priv);
+ mlx5_flow_list_flush(dev, &priv->flows);
return 0;
}
@@ -2455,10 +3139,10 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
* returned data from the counter.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_query_count(struct ibv_counter_set *cs,
+mlx5_flow_query_count(struct ibv_counter_set *cs,
struct mlx5_flow_counter_stats *counter_stats,
struct rte_flow_query_count *query_count,
struct rte_flow_error *error)
@@ -2472,15 +3156,13 @@ priv_flow_query_count(struct ibv_counter_set *cs,
.out = counters,
.outlen = 2 * sizeof(uint64_t),
};
- int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
+ int err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
- if (res) {
- rte_flow_error_set(error, -res,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot read counter");
- return -res;
- }
+ if (err)
+ return rte_flow_error_set(error, err,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
query_count->hits_set = 1;
query_count->bytes_set = 1;
query_count->hits = counters[0] - counter_stats->hits;
@@ -2499,29 +3181,28 @@ priv_flow_query_count(struct ibv_counter_set *cs,
* @see rte_flow_ops
*/
int
-mlx5_flow_query(struct rte_eth_dev *dev,
+mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
struct rte_flow *flow,
- enum rte_flow_action_type action __rte_unused,
+ const struct rte_flow_action *action __rte_unused,
void *data,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- int res = EINVAL;
-
- priv_lock(priv);
if (flow->cs) {
- res = priv_flow_query_count(flow->cs,
- &flow->counter_stats,
- (struct rte_flow_query_count *)data,
- error);
+ int ret;
+
+ ret = mlx5_flow_query_count(flow->cs,
+ &flow->counter_stats,
+ (struct rte_flow_query_count *)data,
+ error);
+ if (ret)
+ return ret;
} else {
- rte_flow_error_set(error, res,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "no counter found for flow");
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "no counter found for flow");
}
- priv_unlock(priv);
- return -res;
+ return 0;
}
#endif
@@ -2538,48 +3219,50 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"port must be stopped first");
- priv_unlock(priv);
return -rte_errno;
}
priv->isolated = !!enable;
if (enable)
- priv->dev->dev_ops = &mlx5_dev_ops_isolate;
+ dev->dev_ops = &mlx5_dev_ops_isolate;
else
- priv->dev->dev_ops = &mlx5_dev_ops;
- priv_unlock(priv);
+ dev->dev_ops = &mlx5_dev_ops;
return 0;
}
/**
* Convert a flow director filter to a generic flow.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Flow director filter to add.
* @param attributes
* Generic flow parameters structure.
*
* @return
- * 0 on success, errno value on error.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_filter_convert(struct priv *priv,
+mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
struct mlx5_fdir *attributes)
{
+ struct priv *priv = dev->data->dev_private;
const struct rte_eth_fdir_input *input = &fdir_filter->input;
+ const struct rte_eth_fdir_masks *mask =
+ &dev->data->dev_conf.fdir_conf.mask;
/* Validate queue number. */
if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
- ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
- return EINVAL;
+ DRV_LOG(ERR, "port %u invalid queue number %d",
+ dev->data->port_id, fdir_filter->action.rx_queue);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
attributes->attr.ingress = 1;
attributes->items[0] = (struct rte_flow_item) {
@@ -2600,144 +3283,140 @@ priv_fdir_filter_convert(struct priv *priv,
};
break;
default:
- ERROR("invalid behavior %d", fdir_filter->action.behavior);
- return ENOTSUP;
+ DRV_LOG(ERR, "port %u invalid behavior %d",
+ dev->data->port_id,
+ fdir_filter->action.behavior);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
attributes->queue.index = fdir_filter->action.rx_queue;
+ /* Handle L3. */
switch (fdir_filter->input.flow_type) {
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
attributes->l3.ipv4.hdr = (struct ipv4_hdr){
- .src_addr = input->flow.udp4_flow.ip.src_ip,
- .dst_addr = input->flow.udp4_flow.ip.dst_ip,
- .time_to_live = input->flow.udp4_flow.ip.ttl,
- .type_of_service = input->flow.udp4_flow.ip.tos,
- .next_proto_id = input->flow.udp4_flow.ip.proto,
+ .src_addr = input->flow.ip4_flow.src_ip,
+ .dst_addr = input->flow.ip4_flow.dst_ip,
+ .time_to_live = input->flow.ip4_flow.ttl,
+ .type_of_service = input->flow.ip4_flow.tos,
+ .next_proto_id = input->flow.ip4_flow.proto,
};
- attributes->l4.udp.hdr = (struct udp_hdr){
- .src_port = input->flow.udp4_flow.src_port,
- .dst_port = input->flow.udp4_flow.dst_port,
+ attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = mask->ipv4_mask.src_ip,
+ .dst_addr = mask->ipv4_mask.dst_ip,
+ .time_to_live = mask->ipv4_mask.ttl,
+ .type_of_service = mask->ipv4_mask.tos,
+ .next_proto_id = mask->ipv4_mask.proto,
};
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.spec = &attributes->l3,
- .mask = &attributes->l3,
+ .mask = &attributes->l3_mask,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.ipv6_flow.hop_limits,
+ .proto = input->flow.ipv6_flow.proto,
+ };
+
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.ipv6_flow.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.ipv6_flow.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
+ mask->ipv6_mask.src_ip,
+ RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
+ memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
+ mask->ipv6_mask.dst_ip,
+ RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ .mask = &attributes->l3_mask,
+ };
+ break;
+ default:
+ DRV_LOG(ERR, "port %u invalid flow type%d",
+ dev->data->port_id, fdir_filter->input.flow_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ /* Handle L4. */
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp4_flow.src_port,
+ .dst_port = input->flow.udp4_flow.dst_port,
+ };
+ attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_UDP,
.spec = &attributes->l4,
- .mask = &attributes->l4,
+ .mask = &attributes->l4_mask,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- attributes->l3.ipv4.hdr = (struct ipv4_hdr){
- .src_addr = input->flow.tcp4_flow.ip.src_ip,
- .dst_addr = input->flow.tcp4_flow.ip.dst_ip,
- .time_to_live = input->flow.tcp4_flow.ip.ttl,
- .type_of_service = input->flow.tcp4_flow.ip.tos,
- .next_proto_id = input->flow.tcp4_flow.ip.proto,
- };
attributes->l4.tcp.hdr = (struct tcp_hdr){
.src_port = input->flow.tcp4_flow.src_port,
.dst_port = input->flow.tcp4_flow.dst_port,
};
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV4,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
+ attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_TCP,
.spec = &attributes->l4,
- .mask = &attributes->l4,
- };
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- attributes->l3.ipv4.hdr = (struct ipv4_hdr){
- .src_addr = input->flow.ip4_flow.src_ip,
- .dst_addr = input->flow.ip4_flow.dst_ip,
- .time_to_live = input->flow.ip4_flow.ttl,
- .type_of_service = input->flow.ip4_flow.tos,
- .next_proto_id = input->flow.ip4_flow.proto,
- };
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV4,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
+ .mask = &attributes->l4_mask,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- attributes->l3.ipv6.hdr = (struct ipv6_hdr){
- .hop_limits = input->flow.udp6_flow.ip.hop_limits,
- .proto = input->flow.udp6_flow.ip.proto,
- };
- memcpy(attributes->l3.ipv6.hdr.src_addr,
- input->flow.udp6_flow.ip.src_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- memcpy(attributes->l3.ipv6.hdr.dst_addr,
- input->flow.udp6_flow.ip.dst_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
attributes->l4.udp.hdr = (struct udp_hdr){
.src_port = input->flow.udp6_flow.src_port,
.dst_port = input->flow.udp6_flow.dst_port,
};
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV6,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
+ attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_UDP,
.spec = &attributes->l4,
- .mask = &attributes->l4,
+ .mask = &attributes->l4_mask,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- attributes->l3.ipv6.hdr = (struct ipv6_hdr){
- .hop_limits = input->flow.tcp6_flow.ip.hop_limits,
- .proto = input->flow.tcp6_flow.ip.proto,
- };
- memcpy(attributes->l3.ipv6.hdr.src_addr,
- input->flow.tcp6_flow.ip.src_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- memcpy(attributes->l3.ipv6.hdr.dst_addr,
- input->flow.tcp6_flow.ip.dst_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
attributes->l4.tcp.hdr = (struct tcp_hdr){
.src_port = input->flow.tcp6_flow.src_port,
.dst_port = input->flow.tcp6_flow.dst_port,
};
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV6,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
+ attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_TCP,
.spec = &attributes->l4,
- .mask = &attributes->l4,
+ .mask = &attributes->l4_mask,
};
break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- attributes->l3.ipv6.hdr = (struct ipv6_hdr){
- .hop_limits = input->flow.ipv6_flow.hop_limits,
- .proto = input->flow.ipv6_flow.proto,
- };
- memcpy(attributes->l3.ipv6.hdr.src_addr,
- input->flow.ipv6_flow.src_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- memcpy(attributes->l3.ipv6.hdr.dst_addr,
- input->flow.ipv6_flow.dst_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV6,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
- };
break;
default:
- ERROR("invalid flow type%d",
- fdir_filter->input.flow_type);
- return ENOTSUP;
+ DRV_LOG(ERR, "port %u invalid flow type%d",
+ dev->data->port_id, fdir_filter->input.flow_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
return 0;
}
@@ -2745,18 +3424,19 @@ priv_fdir_filter_convert(struct priv *priv,
/**
* Add new flow director filter and store it in list.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Flow director filter to add.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_filter_add(struct priv *priv,
+mlx5_fdir_filter_add(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_fdir attributes = {
.attr.group = 0,
.l2_mask = {
@@ -2772,41 +3452,40 @@ priv_fdir_filter_add(struct priv *priv,
struct rte_flow *flow;
int ret;
- ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
+ ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
if (ret)
- return -ret;
- ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
+ return ret;
+ ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
attributes.actions, &error, &parser);
if (ret)
- return -ret;
- flow = priv_flow_create(priv,
- &priv->flows,
- &attributes.attr,
- attributes.items,
- attributes.actions,
- &error);
+ return ret;
+ flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
+ attributes.items, attributes.actions,
+ &error);
if (flow) {
- DEBUG("FDIR created %p", (void *)flow);
+ DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
+ (void *)flow);
return 0;
}
- return ENOTSUP;
+ return -rte_errno;
}
/**
* Delete specific filter.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Filter to be deleted.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_filter_delete(struct priv *priv,
+mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_fdir attributes = {
.attr.group = 0,
};
@@ -2819,10 +3498,10 @@ priv_fdir_filter_delete(struct priv *priv,
unsigned int i;
int ret;
- ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
+ ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
if (ret)
- return -ret;
- ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
+ return ret;
+ ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
attributes.actions, &error, &parser);
if (ret)
goto exit;
@@ -2850,11 +3529,14 @@ priv_fdir_filter_delete(struct priv *priv,
struct ibv_spec_header *flow_h;
void *flow_spec;
unsigned int specs_n;
+ unsigned int queue_id = parser.drop ? HASH_RXQ_ETH :
+ parser.layer;
- attr = parser.queue[HASH_RXQ_ETH].ibv_attr;
- flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr;
+ attr = parser.queue[queue_id].ibv_attr;
+ flow_attr = flow->frxq[queue_id].ibv_attr;
/* Compare first the attributes. */
- if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr)))
+ if (!flow_attr ||
+ memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr)))
continue;
if (attr->num_of_specs == 0)
continue;
@@ -2879,67 +3561,70 @@ wrong_flow:
/* The flow does not match. */
continue;
}
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (flow)
- priv_flow_destroy(priv, &priv->flows, flow);
+ mlx5_flow_list_destroy(dev, &priv->flows, flow);
exit:
for (i = 0; i != hash_rxq_init_n; ++i) {
if (parser.queue[i].ibv_attr)
rte_free(parser.queue[i].ibv_attr);
}
- return -ret;
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
* Update queue for specific filter.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Filter to be updated.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_filter_update(struct priv *priv,
+mlx5_fdir_filter_update(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
int ret;
- ret = priv_fdir_filter_delete(priv, fdir_filter);
+ ret = mlx5_fdir_filter_delete(dev, fdir_filter);
if (ret)
return ret;
- ret = priv_fdir_filter_add(priv, fdir_filter);
- return ret;
+ return mlx5_fdir_filter_add(dev, fdir_filter);
}
/**
* Flush all filters.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
static void
-priv_fdir_filter_flush(struct priv *priv)
+mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
{
- priv_flow_flush(priv, &priv->flows);
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_flush(dev, &priv->flows);
}
/**
* Get flow director information.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] fdir_info
* Resulting flow director information.
*/
static void
-priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
+mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
{
struct rte_eth_fdir_masks *mask =
- &priv->dev->data->dev_conf.fdir_conf.mask;
+ &dev->data->dev_conf.fdir_conf.mask;
- fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
+ fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
fdir_info->guarant_spc = 0;
rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
fdir_info->max_flexpayload = 0;
@@ -2953,54 +3638,52 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
/**
* Deal with flow director operations.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param filter_op
* Operation to perform.
* @param arg
* Pointer to operation-specific structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
+mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+ void *arg)
{
enum rte_fdir_mode fdir_mode =
- priv->dev->data->dev_conf.fdir_conf.mode;
- int ret = 0;
+ dev->data->dev_conf.fdir_conf.mode;
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
- ERROR("%p: flow director mode %d not supported",
- (void *)priv, fdir_mode);
- return EINVAL;
+ DRV_LOG(ERR, "port %u flow director mode %d not supported",
+ dev->data->port_id, fdir_mode);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
- ret = priv_fdir_filter_add(priv, arg);
- break;
+ return mlx5_fdir_filter_add(dev, arg);
case RTE_ETH_FILTER_UPDATE:
- ret = priv_fdir_filter_update(priv, arg);
- break;
+ return mlx5_fdir_filter_update(dev, arg);
case RTE_ETH_FILTER_DELETE:
- ret = priv_fdir_filter_delete(priv, arg);
- break;
+ return mlx5_fdir_filter_delete(dev, arg);
case RTE_ETH_FILTER_FLUSH:
- priv_fdir_filter_flush(priv);
+ mlx5_fdir_filter_flush(dev);
break;
case RTE_ETH_FILTER_INFO:
- priv_fdir_info_get(priv, arg);
+ mlx5_fdir_info_get(dev, arg);
break;
default:
- DEBUG("%p: unknown operation %u", (void *)priv,
- filter_op);
- ret = EINVAL;
- break;
+ DRV_LOG(DEBUG, "port %u unknown operation %u",
+ dev->data->port_id, filter_op);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
- return ret;
+ return 0;
}
/**
@@ -3016,7 +3699,7 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
* Pointer to operation-specific structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
@@ -3024,24 +3707,74 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = EINVAL;
- struct priv *priv = dev->data->dev_private;
-
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
+ if (filter_op != RTE_ETH_FILTER_GET) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
*(const void **)arg = &mlx5_flow_ops;
return 0;
case RTE_ETH_FILTER_FDIR:
- priv_lock(priv);
- ret = priv_fdir_ctrl_func(priv, filter_op, arg);
- priv_unlock(priv);
- break;
+ return mlx5_fdir_ctrl_func(dev, filter_op, arg);
default:
- ERROR("%p: filter type (%d) not supported",
- (void *)dev, filter_type);
- break;
+ DRV_LOG(ERR, "port %u filter type (%d) not supported",
+ dev->data->port_id, filter_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
- return -ret;
+ return 0;
+}
+
+/**
+ * Detect number of Verbs flow priorities supported.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * number of supported Verbs flow priority.
+ */
+unsigned int
+mlx5_get_max_verbs_prio(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int verb_priorities = MLX5_VERBS_FLOW_PRIO_8;
+ struct {
+ struct ibv_flow_attr attr;
+ struct ibv_flow_spec_eth eth;
+ struct ibv_flow_spec_action_drop drop;
+ } flow_attr = {
+ .attr = {
+ .num_of_specs = 2,
+ },
+ .eth = {
+ .type = IBV_FLOW_SPEC_ETH,
+ .size = sizeof(struct ibv_flow_spec_eth),
+ },
+ .drop = {
+ .size = sizeof(struct ibv_flow_spec_action_drop),
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ },
+ };
+ struct ibv_flow *flow;
+
+ do {
+ flow_attr.attr.priority = verb_priorities - 1;
+ flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp,
+ &flow_attr.attr);
+ if (flow) {
+ claim_zero(mlx5_glue->destroy_flow(flow));
+ /* Try more priorities. */
+ verb_priorities *= 2;
+ } else {
+ /* Failed, restore last right number. */
+ verb_priorities /= 2;
+ break;
+ }
+ } while (1);
+ DRV_LOG(DEBUG, "port %u Verbs flow priorities: %d,"
+ " user flow priorities: %d",
+ dev->data->port_id, verb_priorities, MLX5_CTRL_FLOW_PRIORITY);
+ return verb_priorities;
}
diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c
index 1c4396ad..c7965e51 100644
--- a/drivers/net/mlx5/mlx5_glue.c
+++ b/drivers/net/mlx5/mlx5_glue.c
@@ -1,12 +1,18 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox Technologies, Ltd.
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
+/*
+ * Not needed by this file; included to work around the lack of off_t
+ * definition for mlx5dv.h with unpatched rdma-core versions.
+ */
+#include <sys/types.h>
+
/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -287,6 +293,21 @@ mlx5_glue_dv_create_cq(struct ibv_context *context,
return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);
}
+static struct ibv_wq *
+mlx5_glue_dv_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr)
+{
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ (void)context;
+ (void)wq_attr;
+ (void)mlx5_wq_attr;
+ return NULL;
+#else
+ return mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr);
+#endif
+}
+
static int
mlx5_glue_dv_query_device(struct ibv_context *ctx,
struct mlx5dv_context *attrs_out)
@@ -307,6 +328,21 @@ mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)
return mlx5dv_init_obj(obj, obj_type);
}
+static struct ibv_qp *
+mlx5_glue_dv_create_qp(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ return mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr);
+#else
+ (void)context;
+ (void)qp_init_attr_ex;
+ (void)dv_qp_init_attr;
+ return NULL;
+#endif
+}
+
const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
.version = MLX5_GLUE_VERSION,
.fork_init = mlx5_glue_fork_init,
@@ -347,7 +383,9 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
.port_state_str = mlx5_glue_port_state_str,
.cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
.dv_create_cq = mlx5_glue_dv_create_cq,
+ .dv_create_wq = mlx5_glue_dv_create_wq,
.dv_query_device = mlx5_glue_dv_query_device,
.dv_set_context_attr = mlx5_glue_dv_set_context_attr,
.dv_init_obj = mlx5_glue_dv_init_obj,
+ .dv_create_qp = mlx5_glue_dv_create_qp,
};
diff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h
index b5efee3b..e584d367 100644
--- a/drivers/net/mlx5/mlx5_glue.h
+++ b/drivers/net/mlx5/mlx5_glue.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox Technologies, Ltd.
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
#ifndef MLX5_GLUE_H_
@@ -31,6 +31,14 @@ struct ibv_counter_set_init_attr;
struct ibv_query_counter_set_attr;
#endif
+#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+struct mlx5dv_qp_init_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+struct mlx5dv_wq_init_attr;
+#endif
+
/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
struct mlx5_glue {
const char *version;
@@ -100,12 +108,20 @@ struct mlx5_glue {
(struct ibv_context *context,
struct ibv_cq_init_attr_ex *cq_attr,
struct mlx5dv_cq_init_attr *mlx5_cq_attr);
+ struct ibv_wq *(*dv_create_wq)
+ (struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr);
int (*dv_query_device)(struct ibv_context *ctx_in,
struct mlx5dv_context *attrs_out);
int (*dv_set_context_attr)(struct ibv_context *ibv_ctx,
enum mlx5dv_set_ctx_attr_type type,
void *attr);
int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);
+ struct ibv_qp *(*dv_create_qp)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr);
};
const struct mlx5_glue *mlx5_glue;
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index e8a8d459..672a4761 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -35,44 +35,52 @@
/**
* Get MAC address by querying netdevice.
*
- * @param[in] priv
- * struct priv for the requested device.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[out] mac
* MAC address output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
+mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
{
struct ifreq request;
+ int ret;
- if (priv_ifreq(priv, SIOCGIFHWADDR, &request))
- return -1;
+ ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request);
+ if (ret)
+ return ret;
memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
return 0;
}
/**
- * DPDK callback to remove a MAC address.
+ * Remove a MAC address from the internal array.
*
* @param dev
* Pointer to Ethernet device structure.
* @param index
* MAC address index.
*/
-void
-mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+static void
+mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
+ struct priv *priv = dev->data->dev_private;
+ const int vf = priv->config.vf;
+
assert(index < MLX5_MAX_MAC_ADDRESSES);
+ if (is_zero_ether_addr(&dev->data->mac_addrs[index]))
+ return;
+ if (vf)
+ mlx5_nl_mac_addr_remove(dev, &dev->data->mac_addrs[index],
+ index);
memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr));
- if (!dev->data->promiscuous)
- mlx5_traffic_restart(dev);
}
/**
- * DPDK callback to add a MAC address.
+ * Adds a MAC address to the internal array.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -80,21 +88,23 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
* MAC address to register.
* @param index
* MAC address index.
- * @param vmdq
- * VMDq pool index to associate address with (ignored).
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
-mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
- uint32_t index, uint32_t vmdq)
+static int
+mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index)
{
+ struct priv *priv = dev->data->dev_private;
+ const int vf = priv->config.vf;
unsigned int i;
- int ret = 0;
- (void)vmdq;
assert(index < MLX5_MAX_MAC_ADDRESSES);
+ if (is_zero_ether_addr(mac)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
/* First, make sure this address isn't already configured. */
for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) {
/* Skip this index, it's going to be reconfigured. */
@@ -103,12 +113,74 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac)))
continue;
/* Address already configured elsewhere, return with error. */
- return EADDRINUSE;
+ rte_errno = EADDRINUSE;
+ return -rte_errno;
+ }
+ if (vf) {
+ int ret = mlx5_nl_mac_addr_add(dev, mac, index);
+
+ if (ret)
+ return ret;
}
dev->data->mac_addrs[index] = *mac;
+ return 0;
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+void
+mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ int ret;
+
+ if (index >= MLX5_MAX_UC_MAC_ADDRESSES)
+ return;
+ mlx5_internal_mac_addr_remove(dev, index);
+ if (!dev->data->promiscuous) {
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot restart traffic: %s",
+ dev->data->port_id, strerror(rte_errno));
+ }
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (ignored).
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index, uint32_t vmdq __rte_unused)
+{
+ int ret;
+
+ if (index >= MLX5_MAX_UC_MAC_ADDRESSES) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ ret = mlx5_internal_mac_addr_add(dev, mac, index);
+ if (ret < 0)
+ return ret;
if (!dev->data->promiscuous)
- mlx5_traffic_restart(dev);
- return ret;
+ return mlx5_traffic_restart(dev);
+ return 0;
}
/**
@@ -118,10 +190,43 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
* Pointer to Ethernet device structure.
* @param mac_addr
* MAC address to register.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-void
+int
mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
- DEBUG("%p: setting primary MAC address", (void *)dev);
- mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+ DRV_LOG(DEBUG, "port %u setting primary MAC address",
+ dev->data->port_id);
+ return mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+}
+
+/**
+ * DPDK callback to set multicast addresses list.
+ *
+ * @see rte_eth_dev_set_mc_addr_list()
+ */
+int
+mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
+{
+ uint32_t i;
+ int ret;
+
+ if (nb_mc_addr >= MLX5_MAX_MC_MAC_ADDRESSES) {
+ rte_errno = ENOSPC;
+ return -rte_errno;
+ }
+ for (i = MLX5_MAX_UC_MAC_ADDRESSES; i != MLX5_MAX_MAC_ADDRESSES; ++i)
+ mlx5_internal_mac_addr_remove(dev, i);
+ i = MLX5_MAX_UC_MAC_ADDRESSES;
+ while (nb_mc_addr--) {
+ ret = mlx5_internal_mac_addr_add(dev, mc_addr_set++, i++);
+ if (ret)
+ return ret;
+ }
+ if (!dev->data->promiscuous)
+ return mlx5_traffic_restart(dev);
+ return 0;
}
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 857dfcd8..08105a44 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#ifdef PEDANTIC
@@ -13,362 +13,1183 @@
#include <rte_mempool.h>
#include <rte_malloc.h>
+#include <rte_rwlock.h>
#include "mlx5.h"
+#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
#include "mlx5_glue.h"
-struct mlx5_check_mempool_data {
+struct mr_find_contig_memsegs_data {
+ uintptr_t addr;
+ uintptr_t start;
+ uintptr_t end;
+ const struct rte_memseg_list *msl;
+};
+
+struct mr_update_mp_data {
+ struct rte_eth_dev *dev;
+ struct mlx5_mr_ctrl *mr_ctrl;
int ret;
- char *start;
- char *end;
};
-/* Called by mlx5_check_mempool() when iterating the memory chunks. */
-static void
-mlx5_check_mempool_cb(struct rte_mempool *mp,
- void *opaque, struct rte_mempool_memhdr *memhdr,
- unsigned int mem_idx)
+/**
+ * Expand B-tree table to a given size. Can't be called with holding
+ * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries for expansion.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_expand(struct mlx5_mr_btree *bt, int n)
{
- struct mlx5_check_mempool_data *data = opaque;
+ void *mem;
+ int ret = 0;
- (void)mp;
- (void)mem_idx;
+ if (n <= bt->size)
+ return ret;
+ /*
+ * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
+ * used inside if there's no room to expand. Because this is a quite
+ * rare case and a part of very slow path, it is very acceptable.
+ * Initially cache_bh[] will be given practically enough space and once
+ * it is expanded, expansion wouldn't be needed again ever.
+ */
+ mem = rte_realloc(bt->table, n * sizeof(struct mlx5_mr_cache), 0);
+ if (mem == NULL) {
+ /* Not an error, B-tree search will be skipped. */
+ DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
+ (void *)bt);
+ ret = -1;
+ } else {
+ DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
+ bt->table = mem;
+ bt->size = n;
+ }
+ return ret;
+}
- /* It already failed, skip the next chunks. */
- if (data->ret != 0)
- return;
- /* It is the first chunk. */
- if (data->start == NULL && data->end == NULL) {
- data->start = memhdr->addr;
- data->end = data->start + memhdr->len;
- return;
+/**
+ * Look up LKey from given B-tree lookup table, store the last index and return
+ * searched LKey.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param[out] idx
+ * Pointer to index. Even on search failure, returns index where it stops
+ * searching so that index can be used when inserting a new entry.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
+{
+ struct mlx5_mr_cache *lkp_tbl;
+ uint16_t n;
+ uint16_t base = 0;
+
+ assert(bt != NULL);
+ lkp_tbl = *bt->table;
+ n = bt->len;
+ /* First entry must be NULL for comparison. */
+ assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
+ /* Binary search. */
+ do {
+ register uint16_t delta = n >> 1;
+
+ if (addr < lkp_tbl[base + delta].start) {
+ n = delta;
+ } else {
+ base += delta;
+ n -= delta;
+ }
+ } while (n > 1);
+ assert(addr >= lkp_tbl[base].start);
+ *idx = base;
+ if (addr < lkp_tbl[base].end)
+ return lkp_tbl[base].lkey;
+ /* Not found. */
+ return UINT32_MAX;
+}
+
+/**
+ * Insert an entry to B-tree lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param entry
+ * Pointer to new entry to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry)
+{
+ struct mlx5_mr_cache *lkp_tbl;
+ uint16_t idx = 0;
+ size_t shift;
+
+ assert(bt != NULL);
+ assert(bt->len <= bt->size);
+ assert(bt->len > 0);
+ lkp_tbl = *bt->table;
+ /* Find out the slot for insertion. */
+ if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
+ DRV_LOG(DEBUG,
+ "abort insertion to B-tree(%p): already exist at"
+ " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ /* Already exist, return. */
+ return 0;
}
- if (data->end == memhdr->addr) {
- data->end += memhdr->len;
- return;
+ /* If table is full, return error. */
+ if (unlikely(bt->len == bt->size)) {
+ bt->overflow = 1;
+ return -1;
}
- if (data->start == (char *)memhdr->addr + memhdr->len) {
- data->start -= memhdr->len;
+ /* Insert entry. */
+ ++idx;
+ shift = (bt->len - idx) * sizeof(struct mlx5_mr_cache);
+ if (shift)
+ memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
+ lkp_tbl[idx] = *entry;
+ bt->len++;
+ DRV_LOG(DEBUG,
+ "inserted B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ return 0;
+}
+
+/**
+ * Initialize B-tree and allocate memory for lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries to allocate.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
+{
+ if (bt == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ assert(!bt->table && !bt->size);
+ memset(bt, 0, sizeof(*bt));
+ bt->table = rte_calloc_socket("B-tree table",
+ n, sizeof(struct mlx5_mr_cache),
+ 0, socket);
+ if (bt->table == NULL) {
+ rte_errno = ENOMEM;
+ DRV_LOG(ERR,
+ "failed to allocate memory for btree cache on socket %d",
+ socket);
+ return -rte_errno;
+ }
+ bt->size = n;
+ /* First entry must be NULL for binary search. */
+ (*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
+ .lkey = UINT32_MAX,
+ };
+ DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ return 0;
+}
+
+/**
+ * Free B-tree resources.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
+{
+ if (bt == NULL)
+ return;
+ DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ rte_free(bt->table);
+ memset(bt, 0, sizeof(*bt));
+}
+
+/**
+ * Dump all the entries in a B-tree
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+static void
+mlx5_mr_btree_dump(struct mlx5_mr_btree *bt)
+{
+ int idx;
+ struct mlx5_mr_cache *lkp_tbl;
+
+ if (bt == NULL)
return;
+ lkp_tbl = *bt->table;
+ for (idx = 0; idx < bt->len; ++idx) {
+ struct mlx5_mr_cache *entry = &lkp_tbl[idx];
+
+ DRV_LOG(DEBUG,
+ "B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
}
- /* Error, mempool is not virtually contiguous. */
- data->ret = -1;
}
/**
- * Check if a mempool can be used: it must be virtually contiguous.
+ * Find virtually contiguous memory chunk in a given MR.
*
- * @param[in] mp
- * Pointer to memory pool.
- * @param[out] start
- * Pointer to the start address of the mempool virtual memory area
- * @param[out] end
- * Pointer to the end address of the mempool virtual memory area
+ * @param dev
+ * Pointer to MR structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If not found, this will not be
+ * updated.
+ * @param start_idx
+ * Start index of the memseg bitmap.
*
* @return
- * 0 on success (mempool is virtually contiguous), -1 on error.
+ * Next index to go on lookup.
*/
-static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
- uintptr_t *end)
+static int
+mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
+ int base_idx)
{
- struct mlx5_check_mempool_data data;
+ uintptr_t start = 0;
+ uintptr_t end = 0;
+ uint32_t idx = 0;
- memset(&data, 0, sizeof(data));
- rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
- *start = (uintptr_t)data.start;
- *end = (uintptr_t)data.end;
+ for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
+ if (rte_bitmap_get(mr->ms_bmp, idx)) {
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
- return data.ret;
+ msl = mr->msl;
+ ms = rte_fbarray_get(&msl->memseg_arr,
+ mr->ms_base_idx + idx);
+ assert(msl->page_sz == ms->hugepage_sz);
+ if (!start)
+ start = ms->addr_64;
+ end = ms->addr_64 + ms->hugepage_sz;
+ } else if (start) {
+ /* Passed the end of a fragment. */
+ break;
+ }
+ }
+ if (start) {
+ /* Found one chunk. */
+ entry->start = start;
+ entry->end = end;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ }
+ return idx;
}
/**
- * Register a Memory Region (MR) <-> Memory Pool (MP) association in
- * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
+ * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
+ * Then, this entry will have to be searched by mr_lookup_dev_list() in
+ * mlx5_mr_create() on miss.
*
- * This function should only be called by txq_mp2mr().
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr
+ * Pointer to MR to insert.
*
- * @param priv
- * Pointer to private structure.
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- * @param idx
- * Index of the next available entry.
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int n;
+
+ DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
+ dev->data->port_id, (void *)mr);
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx5_mr_cache entry = { 0, };
+
+ /* Find a contiguous chunk and advance the index. */
+ n = mr_find_next_chunk(mr, &entry, n);
+ if (!entry.end)
+ break;
+ if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
+ /*
+ * Overflowed, but the global table cannot be expanded
+ * because of deadlock.
+ */
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Look up address in the original global MR list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
*
* @return
- * mr on success, NULL on failure.
+ * Found MR on match, NULL otherwise.
*/
-struct mlx5_mr*
-priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
- struct rte_mempool *mp, unsigned int idx)
+static struct mlx5_mr *
+mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+ uintptr_t addr)
{
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
- /* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq_ctrl, mp->name, (void *)mp);
- mr = priv_mr_get(priv, mp);
- if (mr == NULL) {
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DEBUG("Using unregistered mempool 0x%p(%s) in secondary process,"
- " please create mempool before rte_eth_dev_start()",
- (void *)mp, mp->name);
- return NULL;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx5_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (addr >= ret.start && addr < ret.end) {
+ /* Found. */
+ *entry = ret;
+ return mr;
+ }
}
- mr = priv_mr_new(priv, mp);
- }
- if (unlikely(mr == NULL)) {
- DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
- (void *)txq_ctrl);
- return NULL;
- }
- if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
- /* Table is full, remove oldest entry. */
- DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
- (void *)txq_ctrl);
- --idx;
- priv_mr_release(priv, txq->mp2mr[0]);
- memmove(&txq->mp2mr[0], &txq->mp2mr[1],
- (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
}
- /* Store the new entry. */
- txq_ctrl->txq.mp2mr[idx] = mr;
- DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq_ctrl, mp->name, (void *)mp,
- txq_ctrl->txq.mp2mr[idx]->lkey);
- return mr;
+ return NULL;
}
/**
- * Register a Memory Region (MR) <-> Memory Pool (MP) association in
- * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
- *
- * This function should only be called by txq_mp2mr().
+ * Look up address on device.
*
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- * @param idx
- * Index of the next available entry.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
*
* @return
- * mr on success, NULL on failure.
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-struct mlx5_mr*
-mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
- unsigned int idx)
+static uint32_t
+mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+ uintptr_t addr)
{
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct priv *priv = dev->data->dev_private;
+ uint16_t idx;
+ uint32_t lkey = UINT32_MAX;
struct mlx5_mr *mr;
- priv_lock(txq_ctrl->priv);
- mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx);
- priv_unlock(txq_ctrl->priv);
- return mr;
+ /*
+ * If the global cache has overflowed since it failed to expand the
+ * B-tree table, it can't have all the existing MRs. Then, the address
+ * has to be searched by traversing the original MR list instead, which
+ * is very slow path. Otherwise, the global cache is all inclusive.
+ */
+ if (!unlikely(priv->mr.cache.overflow)) {
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX)
+ *entry = (*priv->mr.cache.table)[idx];
+ } else {
+ /* Falling back to the slowest path. */
+ mr = mr_lookup_dev_list(dev, entry, addr);
+ if (mr != NULL)
+ lkey = entry->lkey;
+ }
+ assert(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
+ return lkey;
}
-struct mlx5_mp2mr_mbuf_check_data {
- int ret;
-};
+/**
+ * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
+ * can raise memory free event and the callback function will spin on the lock.
+ *
+ * @param mr
+ * Pointer to MR to free.
+ */
+static void
+mr_free(struct mlx5_mr *mr)
+{
+ if (mr == NULL)
+ return;
+ DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
+ if (mr->ibv_mr != NULL)
+ claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr));
+ if (mr->ms_bmp != NULL)
+ rte_bitmap_free(mr->ms_bmp);
+ rte_free(mr);
+}
/**
- * Callback function for rte_mempool_obj_iter() to check whether a given
- * mempool object looks like a mbuf.
- *
- * @param[in] mp
- * The mempool pointer
- * @param[in] arg
- * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
- * return value.
- * @param[in] obj
- * Object address.
- * @param index
- * Object index, unused.
+ * Releass resources of detached MR having no online entry.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
*/
static void
-txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
- uint32_t index __rte_unused)
+mlx5_mr_garbage_collect(struct rte_eth_dev *dev)
{
- struct mlx5_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf = obj;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr_next;
+ struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
+ /* Must be called from the primary process. */
+ assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
/*
- * Check whether mbuf structure fits element size and whether mempool
- * pointer is valid.
+ * MR can't be freed with holding the lock because rte_free() could call
+ * memory free callback function. This will be a deadlock situation.
*/
- if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
- data->ret = -1;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach the whole free list and release it after unlocking. */
+ free_list = priv->mr.mr_free_list;
+ LIST_INIT(&priv->mr.mr_free_list);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Release resources. */
+ mr_next = LIST_FIRST(&free_list);
+ while (mr_next != NULL) {
+ struct mlx5_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ mr_free(mr);
+ }
+}
+
+/* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
+static int
+mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct mr_find_contig_memsegs_data *data = arg;
+
+ if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
+ return 0;
+ /* Found, save it and stop walking. */
+ data->start = ms->addr_64;
+ data->end = ms->addr_64 + len;
+ data->msl = msl;
+ return 1;
}
/**
- * Iterator function for rte_mempool_walk() to register existing mempools and
- * fill the MP to MR cache of a TX queue.
+ * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Register entire virtually contiguous memory chunk around the address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this will not be updated.
+ * @param addr
+ * Target virtual address to register.
*
- * @param[in] mp
- * Memory Pool to register.
- * @param *arg
- * Pointer to TX queue structure.
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-void
-mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
+static uint32_t
+mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+ uintptr_t addr)
{
- struct priv *priv = (struct priv *)arg;
- struct mlx5_mp2mr_mbuf_check_data data = {
- .ret = 0,
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+ struct mlx5_mr *mr = NULL;
+ size_t len;
+ uint32_t ms_n;
+ uint32_t bmp_size;
+ void *bmp_mem;
+ int ms_idx_shift = -1;
+ unsigned int n;
+ struct mr_find_contig_memsegs_data data = {
+ .addr = addr,
};
- struct mlx5_mr *mr;
+ struct mr_find_contig_memsegs_data data_re;
- /* Register mempool only if the first element looks like a mbuf. */
- if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
- data.ret == -1)
- return;
- mr = priv_mr_get(priv, mp);
- if (mr) {
- priv_mr_release(priv, mr);
- return;
+ DRV_LOG(DEBUG, "port %u creating a MR using address (%p)",
+ dev->data->port_id, (void *)addr);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DRV_LOG(WARNING,
+ "port %u using address (%p) of unregistered mempool"
+ " in secondary process, please create mempool"
+ " before rte_eth_dev_start()",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EPERM;
+ goto err_nolock;
+ }
+ /*
+ * Release detached MRs if any. This can't be called with holding either
+ * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
+ * been detached by the memory free event but it couldn't be released
+ * inside the callback due to deadlock. As a result, releasing resources
+ * is quite opportunistic.
+ */
+ mlx5_mr_garbage_collect(dev);
+ /*
+ * Find out a contiguous virtual address chunk in use, to which the
+ * given address belongs, in order to register maximum range. In the
+ * best case where mempools are not dynamically recreated and
+ * '--socket-mem' is speicified as an EAL option, it is very likely to
+ * have only one MR(LKey) per a socket and per a hugepage-size even
+ * though the system memory is highly fragmented.
+ */
+ if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
+ DRV_LOG(WARNING,
+ "port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_nolock;
+ }
+alloc_resources:
+ /* Addresses must be page-aligned. */
+ assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ msl = data.msl;
+ ms = rte_mem_virt2memseg((void *)data.start, msl);
+ len = data.end - data.start;
+ assert(msl->page_sz == ms->hugepage_sz);
+ /* Number of memsegs in the range. */
+ ms_n = len / msl->page_sz;
+ DRV_LOG(DEBUG,
+ "port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " page_sz=0x%" PRIx64 ", ms_n=%u",
+ dev->data->port_id, (void *)addr,
+ data.start, data.end, msl->page_sz, ms_n);
+ /* Size of memory for bitmap. */
+ bmp_size = rte_bitmap_get_memory_footprint(ms_n);
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE) +
+ bmp_size,
+ RTE_CACHE_LINE_SIZE, msl->socket_id);
+ if (mr == NULL) {
+ DRV_LOG(WARNING,
+ "port %u unable to allocate memory for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENOMEM;
+ goto err_nolock;
+ }
+ mr->msl = msl;
+ /*
+ * Save the index of the first memseg and initialize memseg bitmap. To
+ * see if a memseg of ms_idx in the memseg-list is still valid, check:
+ * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
+ */
+ mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
+ mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
+ if (mr->ms_bmp == NULL) {
+ DRV_LOG(WARNING,
+ "port %u unable to initialize bitamp for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_nolock;
+ }
+ /*
+ * Should recheck whether the extended contiguous chunk is still valid.
+ * Because memory_hotplug_lock can't be held if there's any memory
+ * related calls in a critical path, resource allocation above can't be
+ * locked. If the memory has been changed at this point, try again with
+ * just single page. If not, go on with the big chunk atomically from
+ * here.
+ */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ data_re = data;
+ if (len > msl->page_sz &&
+ !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
+ DRV_LOG(WARNING,
+ "port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_memlock;
+ }
+ if (data.start != data_re.start || data.end != data_re.end) {
+ /*
+ * The extended contiguous chunk has been changed. Try again
+ * with single memseg instead.
+ */
+ data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
+ data.end = data.start + msl->page_sz;
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ mr_free(mr);
+ goto alloc_resources;
+ }
+ assert(data.msl == data_re.msl);
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /*
+ * Check the address is really missing. If other thread already created
+ * one or it is not found due to overflow, abort and return.
+ */
+ if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
+ /*
+ * Insert to the global cache table. It may fail due to
+ * low-on-memory. Then, this entry will have to be searched
+ * here again.
+ */
+ mr_btree_insert(&priv->mr.cache, entry);
+ DRV_LOG(DEBUG,
+ "port %u found MR for %p on final lookup, abort",
+ dev->data->port_id, (void *)addr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ /*
+ * Must be unlocked before calling rte_free() because
+ * mlx5_mr_mem_event_free_cb() can be called inside.
+ */
+ mr_free(mr);
+ return entry->lkey;
}
- priv_mr_new(priv, mp);
+ /*
+ * Trim start and end addresses for verbs MR. Set bits for registering
+ * memsegs but exclude already registered ones. Bitmap can be
+ * fragmented.
+ */
+ for (n = 0; n < ms_n; ++n) {
+ uintptr_t start;
+ struct mlx5_mr_cache ret = { 0, };
+
+ start = data_re.start + n * msl->page_sz;
+ /* Exclude memsegs already registered by other MRs. */
+ if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
+ /*
+ * Start from the first unregistered memseg in the
+ * extended range.
+ */
+ if (ms_idx_shift == -1) {
+ mr->ms_base_idx += n;
+ data.start = start;
+ ms_idx_shift = n;
+ }
+ data.end = start + msl->page_sz;
+ rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
+ ++mr->ms_n;
+ }
+ }
+ len = data.end - data.start;
+ mr->ms_bmp_n = len / msl->page_sz;
+ assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ /*
+ * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
+ * called with holding the memory lock because it doesn't use
+ * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
+ * through mlx5_alloc_verbs_buf().
+ */
+ mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ DRV_LOG(WARNING,
+ "port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_mrlock;
+ }
+ assert((uintptr_t)mr->ibv_mr->addr == data.start);
+ assert(mr->ibv_mr->length == len);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DRV_LOG(DEBUG,
+ "port %u MR CREATED (%p) for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ /* Fill in output data. */
+ mr_lookup_dev(dev, entry, addr);
+ /* Lookup can't fail. */
+ assert(entry->lkey != UINT32_MAX);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ return entry->lkey;
+err_mrlock:
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+err_memlock:
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+err_nolock:
+ /*
+ * In case of error, as this can be called in a datapath, a warning
+ * message per an error is preferable instead. Must be unlocked before
+ * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
+ * inside.
+ */
+ mr_free(mr);
+ return UINT32_MAX;
}
/**
- * Register a new memory region from the mempool and store it in the memory
- * region list.
+ * Rebuild the global B-tree cache of device from the original MR list.
*
- * @param priv
- * Pointer to private structure.
- * @param mp
- * Pointer to the memory pool to register.
- * @return
- * The memory region on success.
+ * @param dev
+ * Pointer to Ethernet device.
*/
-struct mlx5_mr*
-priv_mr_new(struct priv *priv, struct rte_mempool *mp)
+static void
+mr_rebuild_dev_cache(struct rte_eth_dev *dev)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start;
- uintptr_t end;
- unsigned int i;
+ struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
- mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
- if (!mr) {
- DEBUG("unable to configure MR, ibv_reg_mr() failed.");
- return NULL;
+ DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
+ /* Flush cache to rebuild. */
+ priv->mr.cache.len = 1;
+ priv->mr.cache.overflow = 0;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr)
+ if (mr_insert_dev_cache(dev, mr) < 0)
+ return;
+}
+
+/**
+ * Callback for memory free event. Iterate freed memsegs and check whether it
+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
+ * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
+ * secondary process, the garbage collector will be called in primary process
+ * as the secondary process can't call mlx5_mr_create().
+ *
+ * The global cache must be rebuilt if there's any change and this event has to
+ * be propagated to dataplane threads to flush the local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Address of freed memory.
+ * @param len
+ * Size of freed memory.
+ */
+static void
+mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_memseg_list *msl;
+ struct mlx5_mr *mr;
+ int ms_n;
+ int i;
+ int rebuild = 0;
+
+ DRV_LOG(DEBUG, "port %u free callback: addr=%p, len=%zu",
+ dev->data->port_id, addr, len);
+ msl = rte_mem_virt2memseg_list(addr);
+ /* addr and len must be page-aligned. */
+ assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ assert(len == RTE_ALIGN(len, msl->page_sz));
+ ms_n = len / msl->page_sz;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Clear bits of freed memsegs from MR. */
+ for (i = 0; i < ms_n; ++i) {
+ const struct rte_memseg *ms;
+ struct mlx5_mr_cache entry;
+ uintptr_t start;
+ int ms_idx;
+ uint32_t pos;
+
+ /* Find MR having this memseg. */
+ start = (uintptr_t)addr + i * msl->page_sz;
+ mr = mr_lookup_dev_list(dev, &entry, start);
+ if (mr == NULL)
+ continue;
+ ms = rte_mem_virt2memseg((void *)start, msl);
+ assert(ms != NULL);
+ assert(msl->page_sz == ms->hugepage_sz);
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ pos = ms_idx - mr->ms_base_idx;
+ assert(rte_bitmap_get(mr->ms_bmp, pos));
+ assert(pos < mr->ms_bmp_n);
+ DRV_LOG(DEBUG, "port %u MR(%p): clear bitmap[%u] for addr %p",
+ dev->data->port_id, (void *)mr, pos, (void *)start);
+ rte_bitmap_clear(mr->ms_bmp, pos);
+ if (--mr->ms_n == 0) {
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ DRV_LOG(DEBUG, "port %u remove MR(%p) from list",
+ dev->data->port_id, (void *)mr);
+ }
+ /*
+ * MR is fragmented or will be freed. the global cache must be
+ * rebuilt.
+ */
+ rebuild = 1;
}
- if (mlx5_check_mempool(mp, &start, &end) != 0) {
- ERROR("mempool %p: not virtually contiguous",
- (void *)mp);
- return NULL;
+ if (rebuild) {
+ mr_rebuild_dev_cache(dev);
+ /*
+ * Flush local caches by propagating invalidation across cores.
+ * rte_smp_wmb() is enough to synchronize this event. If one of
+ * freed memsegs is seen by other core, that means the memseg
+ * has been allocated by allocator, which will come after this
+ * free call. Therefore, this store instruction (incrementing
+ * generation below) will be guaranteed to be seen by other core
+ * before the core sees the newly allocated memory.
+ */
+ ++priv->mr.dev_gen;
+ DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
+ priv->mr.dev_gen);
+ rte_smp_wmb();
}
- DEBUG("mempool %p area start=%p end=%p size=%zu",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- /* Save original addresses for exact MR lookup. */
- mr->start = start;
- mr->end = end;
- /* Round start and end to page boundary if found in memory segments. */
- for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
- uintptr_t addr = (uintptr_t)ms[i].addr;
- size_t len = ms[i].len;
- unsigned int align = ms[i].hugepage_sz;
-
- if ((start > addr) && (start < addr + len))
- start = RTE_ALIGN_FLOOR(start, align);
- if ((end > addr) && (end < addr + len))
- end = RTE_ALIGN_CEIL(end, align);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ if (rebuild && rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
+ mlx5_mr_dump_dev(dev);
+}
+
+/**
+ * Callback for memory event. This can be called from both primary and secondary
+ * process.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
+ */
+void
+mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct priv *priv;
+ struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
+
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ /* Iterate all the existing mlx5 devices. */
+ LIST_FOREACH(priv, dev_list, mem_event_cb)
+ mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
}
- DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
- IBV_ACCESS_LOCAL_WRITE);
- mr->mp = mp;
- mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
- rte_atomic32_inc(&mr->refcnt);
- DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv,
- (void *)mr, rte_atomic32_read(&mr->refcnt));
- LIST_INSERT_HEAD(&priv->mr, mr, next);
- return mr;
}
/**
- * Search the memory region object in the memory region list.
+ * Look up address in the global MR cache table. If not found, create a new MR.
+ * Insert the found/created entry to local bottom-half cache table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this is not written.
+ * @param addr
+ * Search key.
*
- * @param priv
- * Pointer to private structure.
- * @param mp
- * Pointer to the memory pool to register.
* @return
- * The memory region on success.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-struct mlx5_mr*
-priv_mr_get(struct priv *priv, struct rte_mempool *mp)
+static uint32_t
+mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct mlx5_mr_cache *entry, uintptr_t addr)
{
- struct mlx5_mr *mr;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ uint16_t idx;
+ uint32_t lkey;
- assert(mp);
- if (LIST_EMPTY(&priv->mr))
- return NULL;
- LIST_FOREACH(mr, &priv->mr, next) {
- if (mr->mp == mp) {
- rte_atomic32_inc(&mr->refcnt);
- DEBUG("Memory Region %p refcnt: %d",
- (void *)mr, rte_atomic32_read(&mr->refcnt));
- return mr;
- }
+ /* If local cache table is full, try to double it. */
+ if (unlikely(bt->len == bt->size))
+ mr_btree_expand(bt, bt->size << 1);
+ /* Look up in the global cache. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX) {
+ /* Found. */
+ *entry = (*priv->mr.cache.table)[idx];
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /*
+ * Update local cache. Even if it fails, return the found entry
+ * to update top-half cache. Next time, this entry will be found
+ * in the global cache.
+ */
+ mr_btree_insert(bt, entry);
+ return lkey;
}
- return NULL;
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /* First time to see the address? Create a new MR. */
+ lkey = mlx5_mr_create(dev, entry, addr);
+ /*
+ * Update the local cache if successfully created a new global MR. Even
+ * if failed to create one, there's no action to take in this datapath
+ * code. As returning LKey is invalid, this will eventually make HW
+ * fail.
+ */
+ if (lkey != UINT32_MAX)
+ mr_btree_insert(bt, entry);
+ return lkey;
}
/**
- * Release the memory region object.
+ * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
+ * misses, search in the global MR cache table and update the new entry to
+ * per-queue local caches.
*
- * @param mr
- * Pointer to memory region to release.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param addr
+ * Search key.
*
* @return
- * 0 on success, errno on failure.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-int
-priv_mr_release(struct priv *priv, struct mlx5_mr *mr)
+static uint32_t
+mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ uintptr_t addr)
{
- (void)priv;
- assert(mr);
- DEBUG("Memory Region %p refcnt: %d",
- (void *)mr, rte_atomic32_read(&mr->refcnt));
- if (rte_atomic32_dec_and_test(&mr->refcnt)) {
- claim_zero(mlx5_glue->dereg_mr(mr->mr));
- LIST_REMOVE(mr, next);
- rte_free(mr);
- return 0;
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+ /* Victim in top-half cache to replace with new entry. */
+ struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
+
+ /* Binary-search MR translation table. */
+ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+ /* Update top-half cache. */
+ if (likely(lkey != UINT32_MAX)) {
+ *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
+ } else {
+ /*
+ * If missed in local lookup table, search in the global cache
+ * and local cache_bh[] will be updated inside if possible.
+ * Top-half cache entry will also be updated.
+ */
+ lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr);
+ if (unlikely(lkey == UINT32_MAX))
+ return UINT32_MAX;
}
- return EBUSY;
+ /* Update the most recently used entry. */
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on Rx.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ struct priv *priv = rxq_ctrl->priv;
+
+ DRV_LOG(DEBUG,
+ "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
+}
+
+/**
+ * Bottom-half of LKey search on Tx.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
+{
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq_ctrl->priv;
+
+ DRV_LOG(DEBUG,
+ "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
+}
+
+/**
+ * Flush all of the local cache entries.
+ *
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ */
+void
+mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
+{
+ /* Reset the most-recently-used index. */
+ mr_ctrl->mru = 0;
+ /* Reset the linear search array. */
+ mr_ctrl->head = 0;
+ memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
+ /* Reset the B-tree table. */
+ mr_ctrl->cache_bh.len = 1;
+ mr_ctrl->cache_bh.overflow = 0;
+ /* Update the generation number. */
+ mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
+ DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
+ (void *)mr_ctrl, mr_ctrl->cur_gen);
+}
+
+/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
+static void
+mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ uint32_t lkey;
+
+ /* Stop iteration if failed in the previous walk. */
+ if (data->ret < 0)
+ return;
+ /* Register address of the chunk and update local caches. */
+ lkey = mlx5_mr_addr2mr_bh(data->dev, data->mr_ctrl,
+ (uintptr_t)memhdr->addr);
+ if (lkey == UINT32_MAX)
+ data->ret = -1;
}
/**
- * Verify the flow list is empty
+ * Register entire memory chunks in a Mempool.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
*
- * @return the number of object not released.
+ * @return
+ * 0 on success, -1 on failure.
*/
int
-priv_mr_verify(struct priv *priv)
+mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
{
- int ret = 0;
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
+ return data.ret;
+}
+
+/**
+ * Dump all the created MRs and the global cache entries.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_mr_dump_dev(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
+ int mr_n = 0;
+ int chunk_n = 0;
+
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
- LIST_FOREACH(mr, &priv->mr, next) {
- DEBUG("%p: mr %p still referenced", (void *)priv,
- (void *)mr);
- ++ret;
+ DRV_LOG(DEBUG,
+ "port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ dev->data->port_id, mr_n++,
+ rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_n, mr->ms_bmp_n);
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx5_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (!ret.end)
+ break;
+ DRV_LOG(DEBUG,
+ " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+ chunk_n++, ret.start, ret.end);
+ }
}
- return ret;
+ DRV_LOG(DEBUG, "port %u dumping global cache", dev->data->port_id);
+ mlx5_mr_btree_dump(&priv->mr.cache);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+}
+
+/**
+ * Release all the created MRs and resources. Remove device from memory callback
+ * list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_mr_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+
+ /* Remove from memory callback device list. */
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ LIST_REMOVE(priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
+ mlx5_mr_dump_dev(dev);
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach from MR list and move to free list. */
+ while (mr_next != NULL) {
+ struct mlx5_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ }
+ LIST_INIT(&priv->mr.mr_list);
+ /* Free global cache. */
+ mlx5_mr_btree_free(&priv->mr.cache);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Free all remaining MRs. */
+ mlx5_mr_garbage_collect(dev);
}
diff --git a/drivers/net/mlx5/mlx5_mr.h b/drivers/net/mlx5/mlx5_mr.h
new file mode 100644
index 00000000..e0b28215
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_mr.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_MR_H_
+#define RTE_PMD_MLX5_MR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_eal_memconfig.h>
+#include <rte_ethdev.h>
+#include <rte_rwlock.h>
+#include <rte_bitmap.h>
+
+/* Memory Region object. */
+struct mlx5_mr {
+ LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
+ struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
+ const struct rte_memseg_list *msl;
+ int ms_base_idx; /* Start index of msl->memseg_arr[]. */
+ int ms_n; /* Number of memsegs in use. */
+ uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
+ struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
+};
+
+/* Cache entry for Memory Region. */
+struct mlx5_mr_cache {
+ uintptr_t start; /* Start address of MR. */
+ uintptr_t end; /* End address of MR. */
+ uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
+} __rte_packed;
+
+/* MR Cache table for Binary search. */
+struct mlx5_mr_btree {
+ uint16_t len; /* Number of entries. */
+ uint16_t size; /* Total number of entries. */
+ int overflow; /* Mark failure of table expansion. */
+ struct mlx5_mr_cache (*table)[];
+} __rte_packed;
+
+/* Per-queue MR control descriptor. */
+struct mlx5_mr_ctrl {
+ uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
+ uint32_t cur_gen; /* Generation number saved to flush caches. */
+ uint16_t mru; /* Index of last hit entry in top-half cache. */
+ uint16_t head; /* Index of the oldest entry in top-half cache. */
+ struct mlx5_mr_cache cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
+ struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
+} __rte_packed;
+
+extern struct mlx5_dev_list mlx5_mem_event_cb_list;
+extern rte_rwlock_t mlx5_mem_event_rwlock;
+
+/* First entry must be NULL for comparison. */
+#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
+
+int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
+void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
+void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg);
+int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp);
+void mlx5_mr_dump_dev(struct rte_eth_dev *dev);
+void mlx5_mr_release(struct rte_eth_dev *dev);
+
+/**
+ * Look up LKey from given lookup table by linear search. Firstly look up the
+ * last-hit entry. If miss, the entire array is searched. If found, update the
+ * last-hit index and return LKey.
+ *
+ * @param lkp_tbl
+ * Pointer to lookup table.
+ * @param[in,out] cached_idx
+ * Pointer to last-hit index.
+ * @param n
+ * Size of lookup table.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx5_mr_lookup_cache(struct mlx5_mr_cache *lkp_tbl, uint16_t *cached_idx,
+ uint16_t n, uintptr_t addr)
+{
+ uint16_t idx;
+
+ if (likely(addr >= lkp_tbl[*cached_idx].start &&
+ addr < lkp_tbl[*cached_idx].end))
+ return lkp_tbl[*cached_idx].lkey;
+ for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
+ if (addr >= lkp_tbl[idx].start &&
+ addr < lkp_tbl[idx].end) {
+ /* Found. */
+ *cached_idx = idx;
+ return lkp_tbl[idx].lkey;
+ }
+ }
+ return UINT32_MAX;
+}
+
+#endif /* RTE_PMD_MLX5_MR_H_ */
diff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c
new file mode 100644
index 00000000..dca85835
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_nl.c
@@ -0,0 +1,627 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <unistd.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+
+/* Size of the buffer to receive kernel messages */
+#define MLX5_NL_BUF_SIZE (32 * 1024)
+/* Send buffer size for the Netlink socket */
+#define MLX5_SEND_BUF_SIZE 32768
+/* Receive buffer size for the Netlink socket */
+#define MLX5_RECV_BUF_SIZE 32768
+
+/*
+ * Define NDA_RTA as defined in iproute2 sources.
+ *
+ * see in iproute2 sources file include/libnetlink.h
+ */
+#ifndef MLX5_NDA_RTA
+#define MLX5_NDA_RTA(r) \
+ ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg))))
+#endif
+
+/* Add/remove MAC address through Netlink */
+struct mlx5_nl_mac_addr {
+ struct ether_addr (*mac)[];
+ /**< MAC address handled by the device. */
+ int mac_n; /**< Number of addresses in the array. */
+};
+
+/**
+ * Opens a Netlink socket.
+ *
+ * @param nl_groups
+ * Netlink group value (e.g. RTMGRP_LINK).
+ *
+ * @return
+ * A file descriptor on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+int
+mlx5_nl_init(uint32_t nl_groups)
+{
+ int fd;
+ int sndbuf_size = MLX5_SEND_BUF_SIZE;
+ int rcvbuf_size = MLX5_RECV_BUF_SIZE;
+ struct sockaddr_nl local = {
+ .nl_family = AF_NETLINK,
+ .nl_groups = nl_groups,
+ };
+ int ret;
+
+ fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
+ if (fd == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ ret = bind(fd, (struct sockaddr *)&local, sizeof(local));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ return fd;
+error:
+ close(fd);
+ return -rte_errno;
+}
+
+/**
+ * Send a request message to the kernel on the Netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] nh
+ * The Netlink message send to the kernel.
+ * @param[in] ssn
+ * Sequence number.
+ * @param[in] req
+ * Pointer to the request structure.
+ * @param[in] len
+ * Length of the request in bytes.
+ *
+ * @return
+ * The number of sent bytes on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_nl_request(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn, void *req,
+ int len)
+{
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov[2] = {
+ { .iov_base = nh, .iov_len = sizeof(*nh), },
+ { .iov_base = req, .iov_len = len, },
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = iov,
+ .msg_iovlen = 2,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = sn;
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return send_bytes;
+}
+
+/**
+ * Send a message to the kernel on the Netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * The Netlink socket file descriptor used for communication.
+ * @param[in] nh
+ * The Netlink message send to the kernel.
+ * @param[in] sn
+ * Sequence number.
+ *
+ * @return
+ * The number of sent bytes on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_nl_send(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn)
+{
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov = {
+ .iov_base = nh,
+ .iov_len = nh->nlmsg_len,
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = sn;
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return send_bytes;
+}
+
+/**
+ * Receive a message from the kernel on the Netlink socket, following
+ * mlx5_nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The Netlink socket file descriptor used for communication.
+ * @param[in] sn
+ * Sequence number.
+ * @param[in] cb
+ * The callback function to call for each Netlink message received.
+ * @param[in, out] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg),
+ void *arg)
+{
+ struct sockaddr_nl sa;
+ char buf[MLX5_RECV_BUF_SIZE];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ /* One message at a time */
+ .msg_iovlen = 1,
+ };
+ int multipart = 0;
+ int ret = 0;
+
+ do {
+ struct nlmsghdr *nh;
+ int recv_bytes = 0;
+
+ do {
+ recv_bytes = recvmsg(nlsk_fd, &msg, 0);
+ if (recv_bytes == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ nh = (struct nlmsghdr *)buf;
+ } while (nh->nlmsg_seq != sn);
+ for (;
+ NLMSG_OK(nh, (unsigned int)recv_bytes);
+ nh = NLMSG_NEXT(nh, recv_bytes)) {
+ if (nh->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err_data = NLMSG_DATA(nh);
+
+ if (err_data->error < 0) {
+ rte_errno = -err_data->error;
+ return -rte_errno;
+ }
+ /* Ack message. */
+ return 0;
+ }
+ /* Multi-part msgs and their trailing DONE message. */
+ if (nh->nlmsg_flags & NLM_F_MULTI) {
+ if (nh->nlmsg_type == NLMSG_DONE)
+ return 0;
+ multipart = 1;
+ }
+ if (cb) {
+ ret = cb(nh, arg);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ } while (multipart);
+ return ret;
+}
+
+/**
+ * Parse Netlink message to retrieve the bridge MAC address.
+ *
+ * @param nh
+ * Pointer to Netlink Message Header.
+ * @param arg
+ * PMD data register with this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_nl_mac_addr *data = arg;
+ struct ndmsg *r = NLMSG_DATA(nh);
+ struct rtattr *attribute;
+ int len;
+
+ len = nh->nlmsg_len - NLMSG_LENGTH(sizeof(*r));
+ for (attribute = MLX5_NDA_RTA(r);
+ RTA_OK(attribute, len);
+ attribute = RTA_NEXT(attribute, len)) {
+ if (attribute->rta_type == NDA_LLADDR) {
+ if (data->mac_n == MLX5_MAX_MAC_ADDRESSES) {
+ DRV_LOG(WARNING,
+ "not enough room to finalize the"
+ " request");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+#ifndef NDEBUG
+ char m[18];
+
+ ether_format_addr(m, 18, RTA_DATA(attribute));
+ DRV_LOG(DEBUG, "bridge MAC address %s", m);
+#endif
+ memcpy(&(*data->mac)[data->mac_n++],
+ RTA_DATA(attribute), ETHER_ADDR_LEN);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Get bridge MAC addresses.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac[out]
+ * Pointer to the array table of MAC addresses to fill.
+ * Its size should be of MLX5_MAX_MAC_ADDRESSES.
+ * @param mac_n[out]
+ * Number of entries filled in MAC array.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[],
+ int *mac_n)
+{
+ struct priv *priv = dev->data->dev_private;
+ int iface_idx = mlx5_ifindex(dev);
+ struct {
+ struct nlmsghdr hdr;
+ struct ifinfomsg ifm;
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_type = RTM_GETNEIGH,
+ .nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ },
+ .ifm = {
+ .ifi_family = PF_BRIDGE,
+ .ifi_index = iface_idx,
+ },
+ };
+ struct mlx5_nl_mac_addr data = {
+ .mac = mac,
+ .mac_n = 0,
+ };
+ int fd;
+ int ret;
+ uint32_t sn = priv->nl_sn++;
+
+ if (priv->nl_socket == -1)
+ return 0;
+ fd = priv->nl_socket;
+ ret = mlx5_nl_request(fd, &req.hdr, sn, &req.ifm,
+ sizeof(struct ifinfomsg));
+ if (ret < 0)
+ goto error;
+ ret = mlx5_nl_recv(fd, sn, mlx5_nl_mac_addr_cb, &data);
+ if (ret < 0)
+ goto error;
+ *mac_n = data.mac_n;
+ return 0;
+error:
+ DRV_LOG(DEBUG, "port %u cannot retrieve MAC address list %s",
+ dev->data->port_id, strerror(rte_errno));
+ return -rte_errno;
+}
+
+/**
+ * Modify the MAC address neighbour table with Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac
+ * MAC address to consider.
+ * @param add
+ * 1 to add the MAC address, 0 to remove the MAC address.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac,
+ int add)
+{
+ struct priv *priv = dev->data->dev_private;
+ int iface_idx = mlx5_ifindex(dev);
+ struct {
+ struct nlmsghdr hdr;
+ struct ndmsg ndm;
+ struct rtattr rta;
+ uint8_t buffer[ETHER_ADDR_LEN];
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE |
+ NLM_F_EXCL | NLM_F_ACK,
+ .nlmsg_type = add ? RTM_NEWNEIGH : RTM_DELNEIGH,
+ },
+ .ndm = {
+ .ndm_family = PF_BRIDGE,
+ .ndm_state = NUD_NOARP | NUD_PERMANENT,
+ .ndm_ifindex = iface_idx,
+ .ndm_flags = NTF_SELF,
+ },
+ .rta = {
+ .rta_type = NDA_LLADDR,
+ .rta_len = RTA_LENGTH(ETHER_ADDR_LEN),
+ },
+ };
+ int fd;
+ int ret;
+ uint32_t sn = priv->nl_sn++;
+
+ if (priv->nl_socket == -1)
+ return 0;
+ fd = priv->nl_socket;
+ memcpy(RTA_DATA(&req.rta), mac, ETHER_ADDR_LEN);
+ req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) +
+ RTA_ALIGN(req.rta.rta_len);
+ ret = mlx5_nl_send(fd, &req.hdr, sn);
+ if (ret < 0)
+ goto error;
+ ret = mlx5_nl_recv(fd, sn, NULL, NULL);
+ if (ret < 0)
+ goto error;
+ return 0;
+error:
+ DRV_LOG(DEBUG,
+ "port %u cannot %s MAC address %02X:%02X:%02X:%02X:%02X:%02X"
+ " %s",
+ dev->data->port_id,
+ add ? "add" : "remove",
+ mac->addr_bytes[0], mac->addr_bytes[1],
+ mac->addr_bytes[2], mac->addr_bytes[3],
+ mac->addr_bytes[4], mac->addr_bytes[5],
+ strerror(rte_errno));
+ return -rte_errno;
+}
+
+/**
+ * Add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = mlx5_nl_mac_addr_modify(dev, mac, 1);
+ if (!ret)
+ BITFIELD_SET(priv->mac_own, index);
+ if (ret == -EEXIST)
+ return 0;
+ return ret;
+}
+
+/**
+ * Remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac
+ * MAC address to remove.
+ * @param index
+ * MAC address index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ BITFIELD_RESET(priv->mac_own, index);
+ return mlx5_nl_mac_addr_modify(dev, mac, 0);
+}
+
+/**
+ * Synchronize Netlink bridge table to the internal table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev)
+{
+ struct ether_addr macs[MLX5_MAX_MAC_ADDRESSES];
+ int macs_n = 0;
+ int i;
+ int ret;
+
+ ret = mlx5_nl_mac_addr_list(dev, &macs, &macs_n);
+ if (ret)
+ return;
+ for (i = 0; i != macs_n; ++i) {
+ int j;
+
+ /* Verify the address is not in the array yet. */
+ for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j)
+ if (is_same_ether_addr(&macs[i],
+ &dev->data->mac_addrs[j]))
+ break;
+ if (j != MLX5_MAX_MAC_ADDRESSES)
+ continue;
+ /* Find the first entry available. */
+ for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j) {
+ if (is_zero_ether_addr(&dev->data->mac_addrs[j])) {
+ dev->data->mac_addrs[j] = macs[i];
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * Flush all added MAC addresses.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int i;
+
+ for (i = MLX5_MAX_MAC_ADDRESSES - 1; i >= 0; --i) {
+ struct ether_addr *m = &dev->data->mac_addrs[i];
+
+ if (BITFIELD_ISSET(priv->mac_own, i))
+ mlx5_nl_mac_addr_remove(dev, m, i);
+ }
+}
+
+/**
+ * Enable promiscuous / all multicast mode through Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param flags
+ * IFF_PROMISC for promiscuous, IFF_ALLMULTI for allmulti.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable)
+{
+ struct priv *priv = dev->data->dev_private;
+ int iface_idx = mlx5_ifindex(dev);
+ struct {
+ struct nlmsghdr hdr;
+ struct ifinfomsg ifi;
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_type = RTM_NEWLINK,
+ .nlmsg_flags = NLM_F_REQUEST,
+ },
+ .ifi = {
+ .ifi_flags = enable ? flags : 0,
+ .ifi_change = flags,
+ .ifi_index = iface_idx,
+ },
+ };
+ int fd;
+ int ret;
+
+ assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
+ if (priv->nl_socket < 0)
+ return 0;
+ fd = priv->nl_socket;
+ ret = mlx5_nl_send(fd, &req.hdr, priv->nl_sn++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Enable promiscuous mode through Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_promisc(struct rte_eth_dev *dev, int enable)
+{
+ int ret = mlx5_nl_device_flags(dev, IFF_PROMISC, enable);
+
+ if (ret)
+ DRV_LOG(DEBUG,
+ "port %u cannot %s promisc mode: Netlink error %s",
+ dev->data->port_id, enable ? "enable" : "disable",
+ strerror(rte_errno));
+ return ret;
+}
+
+/**
+ * Enable all multicast mode through Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable)
+{
+ int ret = mlx5_nl_device_flags(dev, IFF_ALLMULTI, enable);
+
+ if (ret)
+ DRV_LOG(DEBUG,
+ "port %u cannot %s allmulti mode: Netlink error %s",
+ dev->data->port_id, enable ? "enable" : "disable",
+ strerror(rte_errno));
+ return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 9eb9c15e..0cf370cd 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_PRM_H_
@@ -107,6 +107,30 @@
/* Inner L4 checksum offload (Tunneled packets only). */
#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
+/* Outer L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5)
+
+/* Outer L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5)
+
+/* Outer L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)
+
+/* Outer L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)
+
+/* Inner L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)
+
+/* Inner L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)
+
+/* Inner L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)
+
+/* Inner L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)
+
/* Is flow mark valid. */
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
@@ -195,6 +219,21 @@ struct mlx5_mpw {
} data;
};
+/* WQE for Multi-Packet RQ. */
+struct mlx5_wqe_mprq {
+ struct mlx5_wqe_srq_next_seg next_seg;
+ struct mlx5_wqe_data_seg dseg;
+};
+
+#define MLX5_MPRQ_LEN_MASK 0x000ffff
+#define MLX5_MPRQ_LEN_SHIFT 0
+#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000
+#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16
+#define MLX5_MPRQ_FILLER_MASK 0x80000000
+#define MLX5_MPRQ_FILLER_SHIFT 31
+
+#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2
+
/* CQ element structure - should be equal to the cache line size */
struct mlx5_cqe {
#if (RTE_CACHE_LINE_SIZE == 128)
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index d06b0bee..d69b4c09 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -35,35 +35,48 @@
* RSS configuration data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct priv *priv = dev->data->dev_private;
- int ret = 0;
+ unsigned int i;
+ unsigned int idx;
- priv_lock(priv);
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
- ret = -EINVAL;
- goto out;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
if (rss_conf->rss_key && rss_conf->rss_key_len) {
+ if (rss_conf->rss_key_len != rss_hash_default_key_len) {
+ DRV_LOG(ERR,
+ "port %u RSS key len must be %zu Bytes long",
+ dev->data->port_id, rss_hash_default_key_len);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
rss_conf->rss_key_len, 0);
if (!priv->rss_conf.rss_key) {
- ret = -ENOMEM;
- goto out;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
memcpy(priv->rss_conf.rss_key, rss_conf->rss_key,
rss_conf->rss_key_len);
priv->rss_conf.rss_key_len = rss_conf->rss_key_len;
}
priv->rss_conf.rss_hf = rss_conf->rss_hf;
-out:
- priv_unlock(priv);
- return ret;
+ /* Enable the RSS hash in all Rx queues. */
+ for (i = 0, idx = 0; idx != priv->rxqs_n; ++i) {
+ if (!(*priv->rxqs)[i])
+ continue;
+ (*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
+ !!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+ ++idx;
+ }
+ return 0;
}
/**
@@ -75,7 +88,7 @@ out:
* RSS configuration data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
@@ -83,9 +96,10 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- if (!rss_conf)
- return -EINVAL;
- priv_lock(priv);
+ if (!rss_conf) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
if (rss_conf->rss_key &&
(rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) {
memcpy(rss_conf->rss_key, priv->rss_conf.rss_key,
@@ -93,24 +107,24 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
}
rss_conf->rss_key_len = priv->rss_conf.rss_key_len;
rss_conf->rss_hf = priv->rss_conf.rss_hf;
- priv_unlock(priv);
return 0;
}
/**
* Allocate/reallocate RETA index table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @praram reta_size
* The size of the array to allocate.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size)
+mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
{
+ struct priv *priv = dev->data->dev_private;
void *mem;
unsigned int old_size = priv->reta_idx_n;
@@ -119,11 +133,12 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size)
mem = rte_realloc(priv->reta_idx,
reta_size * sizeof((*priv->reta_idx)[0]), 0);
- if (!mem)
- return ENOMEM;
+ if (!mem) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
priv->reta_idx = mem;
priv->reta_idx_n = reta_size;
-
if (old_size < reta_size)
memset(&(*priv->reta_idx)[old_size], 0,
(reta_size - old_size) *
@@ -132,28 +147,31 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size)
}
/**
- * Query RETA table.
+ * DPDK callback to get the RETA indirection table.
*
- * @param priv
- * Pointer to private structure.
- * @param[in, out] reta_conf
- * Pointer to the first RETA configuration structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RETA configuration structure array.
* @param reta_size
- * Number of entries.
+ * Size of the RETA table.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-priv_dev_rss_reta_query(struct priv *priv,
+int
+mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
- unsigned int reta_size)
+ uint16_t reta_size)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
- if (!reta_size || reta_size > priv->reta_idx_n)
- return EINVAL;
+ if (!reta_size || reta_size > priv->reta_idx_n) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
/* Fill each entry of the table even if its bit is not set. */
for (idx = 0, i = 0; (i != reta_size); ++i) {
idx = i / RTE_RETA_GROUP_SIZE;
@@ -164,34 +182,36 @@ priv_dev_rss_reta_query(struct priv *priv,
}
/**
- * Update RETA table.
+ * DPDK callback to update the RETA indirection table.
*
- * @param priv
- * Pointer to private structure.
- * @param[in] reta_conf
- * Pointer to the first RETA configuration structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RETA configuration structure array.
* @param reta_size
- * Number of entries.
+ * Size of the RETA table.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-priv_dev_rss_reta_update(struct priv *priv,
+int
+mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
- unsigned int reta_size)
+ uint16_t reta_size)
{
+ int ret;
+ struct priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
unsigned int pos;
- int ret;
- if (!reta_size)
- return EINVAL;
- ret = priv_rss_reta_index_resize(priv, reta_size);
+ if (!reta_size) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ ret = mlx5_rss_reta_index_resize(dev, reta_size);
if (ret)
return ret;
-
for (idx = 0, i = 0; (i != reta_size); ++i) {
idx = i / RTE_RETA_GROUP_SIZE;
pos = i % RTE_RETA_GROUP_SIZE;
@@ -200,63 +220,9 @@ priv_dev_rss_reta_update(struct priv *priv,
assert(reta_conf[idx].reta[pos] < priv->rxqs_n);
(*priv->reta_idx)[i] = reta_conf[idx].reta[pos];
}
- return 0;
-}
-
-/**
- * DPDK callback to get the RETA indirection table.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param reta_conf
- * Pointer to RETA configuration structure array.
- * @param reta_size
- * Size of the RETA table.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- int ret;
- struct priv *priv = dev->data->dev_private;
-
- priv_lock(priv);
- ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size);
- priv_unlock(priv);
- return -ret;
-}
-
-/**
- * DPDK callback to update the RETA indirection table.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param reta_conf
- * Pointer to RETA configuration structure array.
- * @param reta_size
- * Size of the RETA table.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- int ret;
- struct priv *priv = dev->data->dev_private;
-
- priv_lock(priv);
- ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size);
- priv_unlock(priv);
if (dev->data->dev_started) {
mlx5_dev_stop(dev);
- mlx5_dev_start(dev);
+ return mlx5_dev_start(dev);
}
- return -ret;
+ return 0;
}
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 4ffc869a..80824bc4 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -32,8 +32,15 @@
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
+ int ret;
+
dev->data->promiscuous = 1;
- mlx5_traffic_restart(dev);
+ if (((struct priv *)dev->data->dev_private)->config.vf)
+ mlx5_nl_promisc(dev, 1);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -45,8 +52,15 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
void
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{
+ int ret;
+
dev->data->promiscuous = 0;
- mlx5_traffic_restart(dev);
+ if (((struct priv *)dev->data->dev_private)->config.vf)
+ mlx5_nl_promisc(dev, 0);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot disable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -58,8 +72,15 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
+ int ret;
+
dev->data->all_multicast = 1;
- mlx5_traffic_restart(dev);
+ if (((struct priv *)dev->data->dev_private)->config.vf)
+ mlx5_nl_allmulti(dev, 1);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -71,6 +92,13 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
{
+ int ret;
+
dev->data->all_multicast = 0;
- mlx5_traffic_restart(dev);
+ if (((struct priv *)dev->data->dev_private)->config.vf)
+ mlx5_nl_allmulti(dev, 0);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot disable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index ff58c492..de3f869e 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -55,7 +55,124 @@ uint8_t rss_hash_default_key[] = {
const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
/**
- * Allocate RX queue elements.
+ * Check whether Multi-Packet RQ can be enabled for the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+inline int
+mlx5_check_mprq_support(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ if (priv->config.mprq.enabled &&
+ priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
+ return 1;
+ return -ENOTSUP;
+}
+
+/**
+ * Check whether Multi-Packet RQ is enabled for the Rx queue.
+ *
+ * @param rxq
+ * Pointer to receive queue structure.
+ *
+ * @return
+ * 0 if disabled, otherwise enabled.
+ */
+inline int
+mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
+{
+ return rxq->strd_num_n > 0;
+}
+
+/**
+ * Check whether Multi-Packet RQ is enabled for the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 if disabled, otherwise enabled.
+ */
+inline int
+mlx5_mprq_enabled(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint16_t i;
+ uint16_t n = 0;
+
+ if (mlx5_check_mprq_support(dev) < 0)
+ return 0;
+ /* All the configured queues should be enabled. */
+ for (i = 0; i < priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (!rxq)
+ continue;
+ if (mlx5_rxq_mprq_enabled(rxq))
+ ++n;
+ }
+ /* Multi-Packet RQ can't be partially configured. */
+ assert(n == 0 || n == priv->rxqs_n);
+ return n == priv->rxqs_n;
+}
+
+/**
+ * Allocate RX queue elements for Multi-Packet RQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ unsigned int wqe_n = 1 << rxq->elts_n;
+ unsigned int i;
+ int err;
+
+ /* Iterate on segments. */
+ for (i = 0; i <= wqe_n; ++i) {
+ struct mlx5_mprq_buf *buf;
+
+ if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
+ DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ if (i < wqe_n)
+ (*rxq->mprq_bufs)[i] = buf;
+ else
+ rxq->mprq_repl = buf;
+ }
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u allocated and configured %u segments",
+ rxq->port_id, rxq_ctrl->idx, wqe_n);
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ wqe_n = i;
+ for (i = 0; (i != wqe_n); ++i) {
+ if ((*rxq->mprq_bufs)[i] != NULL)
+ rte_mempool_put(rxq->mprq_mp,
+ (*rxq->mprq_bufs)[i]);
+ (*rxq->mprq_bufs)[i] = NULL;
+ }
+ DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ rxq->port_id, rxq_ctrl->idx);
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Allocate RX queue elements for Single-Packet RQ.
*
* @param rxq_ctrl
* Pointer to RX queue structure.
@@ -63,13 +180,13 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
* @return
* 0 on success, errno value on failure.
*/
-int
-rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+static int
+rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
unsigned int i;
- int ret = 0;
+ int err;
/* Iterate on segments. */
for (i = 0; (i != elts_n); ++i) {
@@ -77,8 +194,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) {
- ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
- ret = ENOMEM;
+ DRV_LOG(ERR, "port %u empty mbuf pool",
+ PORT_ID(rxq_ctrl->priv));
+ rte_errno = ENOMEM;
goto error;
}
/* Headroom is reserved by rte_pktmbuf_alloc(). */
@@ -97,7 +215,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
(*rxq_ctrl->rxq.elts)[i] = buf;
}
/* If Rx vector is activated. */
- if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
+ if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
int j;
@@ -118,30 +236,78 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
}
- DEBUG("%p: allocated and configured %u segments (max %u packets)",
- (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
- assert(ret == 0);
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u allocated and configured %u segments"
+ " (max %u packets)",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
return 0;
error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
elts_n = i;
for (i = 0; (i != elts_n); ++i) {
if ((*rxq_ctrl->rxq.elts)[i] != NULL)
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
- DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
- assert(ret > 0);
- return ret;
+ DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
- * Free RX queue elements.
+ * Allocate RX queue elements.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
+}
+
+/**
+ * Free RX queue elements for Multi-Packet RQ.
*
* @param rxq_ctrl
* Pointer to RX queue structure.
*/
static void
-rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ uint16_t i;
+
+ DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
+ rxq->port_id, rxq_ctrl->idx);
+ if (rxq->mprq_bufs == NULL)
+ return;
+ assert(mlx5_rxq_check_vec_support(rxq) < 0);
+ for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
+ if ((*rxq->mprq_bufs)[i] != NULL)
+ mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
+ (*rxq->mprq_bufs)[i] = NULL;
+ }
+ if (rxq->mprq_repl != NULL) {
+ mlx5_mprq_buf_free(rxq->mprq_repl);
+ rxq->mprq_repl = NULL;
+ }
+}
+
+/**
+ * Free RX queue elements for Single-Packet RQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
const uint16_t q_n = (1 << rxq->elts_n);
@@ -149,14 +315,15 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
uint16_t i;
- DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
+ DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
if (rxq->elts == NULL)
return;
/**
* Some mbuf in the Ring belongs to the application. They cannot be
* freed.
*/
- if (rxq_check_vec_support(rxq) > 0) {
+ if (mlx5_rxq_check_vec_support(rxq) > 0) {
for (i = 0; i < used; ++i)
(*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
rxq->rq_pi = rxq->rq_ci;
@@ -169,6 +336,21 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
}
/**
+ * Free RX queue elements.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+ rxq_free_elts_mprq(rxq_ctrl);
+ else
+ rxq_free_elts_sprq(rxq_ctrl);
+}
+
+/**
* Clean up a RX queue.
*
* Destroy objects, free allocated memory and reset the structure for reuse.
@@ -179,24 +361,26 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
void
mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- DEBUG("cleaning up %p", (void *)rxq_ctrl);
+ DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
if (rxq_ctrl->ibv)
- mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
+ mlx5_rxq_ibv_release(rxq_ctrl->ibv);
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
}
/**
* Returns the per-queue supported offloads.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* Supported Rx offloads.
*/
uint64_t
-mlx5_priv_get_rx_queue_offloads(struct priv *priv)
+mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
@@ -217,13 +401,11 @@ mlx5_priv_get_rx_queue_offloads(struct priv *priv)
/**
* Returns the per-port supported offloads.
*
- * @param priv
- * Pointer to private structure.
* @return
* Supported Rx offloads.
*/
uint64_t
-mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
+mlx5_get_rx_port_offloads(void)
{
uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
@@ -231,33 +413,6 @@ mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param offloads
- * Per-queue offloads configuration.
- *
- * @return
- * 1 if the configuration is valid, 0 otherwise.
- */
-static int
-priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
-{
- uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supp_offloads =
- mlx5_priv_get_rx_queue_offloads(priv);
- uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv);
-
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return 0;
- if (((port_offloads ^ offloads) & port_supp_offloads))
- return 0;
- return 1;
-}
-
-/**
*
* @param dev
* Pointer to Ethernet device structure.
@@ -273,7 +428,7 @@ priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
* Memory pool for buffer allocations.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
@@ -284,53 +439,40 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- int ret = 0;
- priv_lock(priv);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
- WARN("%p: increased number of descriptors in RX queue %u"
- " to the next power of two (%d)",
- (void *)dev, idx, desc);
+ DRV_LOG(WARNING,
+ "port %u increased number of descriptors in Rx queue %u"
+ " to the next power of two (%d)",
+ dev->data->port_id, idx, desc);
}
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
+ DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
if (idx >= priv->rxqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->rxqs_n);
- priv_unlock(priv);
- return -EOVERFLOW;
- }
- if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
- ret = ENOTSUP;
- ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- (mlx5_priv_get_rx_port_offloads(priv) |
- mlx5_priv_get_rx_queue_offloads(priv)));
- goto out;
- }
- if (!mlx5_priv_rxq_releasable(priv, idx)) {
- ret = EBUSY;
- ERROR("%p: unable to release queue index %u",
- (void *)dev, idx);
- goto out;
- }
- mlx5_priv_rxq_release(priv, idx);
- rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp);
+ DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->rxqs_n);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
+ }
+ if (!mlx5_rxq_releasable(dev, idx)) {
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ mlx5_rxq_release(dev, idx);
+ rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
if (!rxq_ctrl) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- ret = ENOMEM;
- goto out;
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
- DEBUG("%p: adding RX queue %p to list",
- (void *)dev, (void *)rxq_ctrl);
+ DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ dev->data->port_id, idx);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
-out:
- priv_unlock(priv);
- return -ret;
+ return 0;
}
/**
@@ -350,45 +492,48 @@ mlx5_rx_queue_release(void *dpdk_rxq)
return;
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
- priv_lock(priv);
- if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))
- rte_panic("Rx queue %p is still used by a flow and cannot be"
- " removed\n", (void *)rxq_ctrl);
- mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);
- priv_unlock(priv);
+ if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
+ rte_panic("port %u Rx queue %u is still used by a flow and"
+ " cannot be removed\n",
+ PORT_ID(priv), rxq_ctrl->idx);
+ mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
}
/**
* Allocate queue vector and fill epoll fd list for Rx interrupts.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, negative on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_rx_intr_vec_enable(struct priv *priv)
+mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
unsigned int count = 0;
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
- if (!priv->dev->data->dev_conf.intr_conf.rxq)
+ if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
- priv_rx_intr_vec_disable(priv);
+ mlx5_rx_intr_vec_disable(dev);
intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
if (intr_handle->intr_vec == NULL) {
- ERROR("failed to allocate memory for interrupt vector,"
- " Rx interrupts will not be supported");
- return -ENOMEM;
+ DRV_LOG(ERR,
+ "port %u failed to allocate memory for interrupt"
+ " vector, Rx interrupts will not be supported",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
/* This rxq ibv must not be released in this function. */
- struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i);
+ struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
int fd;
int flags;
int rc;
@@ -402,27 +547,34 @@ priv_rx_intr_vec_enable(struct priv *priv)
continue;
}
if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
- ERROR("too many Rx queues for interrupt vector size"
- " (%d), Rx interrupts cannot be enabled",
- RTE_MAX_RXTX_INTR_VEC_ID);
- priv_rx_intr_vec_disable(priv);
- return -1;
+ DRV_LOG(ERR,
+ "port %u too many Rx queues for interrupt"
+ " vector size (%d), Rx interrupts cannot be"
+ " enabled",
+ dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
+ mlx5_rx_intr_vec_disable(dev);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
fd = rxq_ibv->channel->fd;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
- ERROR("failed to make Rx interrupt file descriptor"
- " %d non-blocking for queue index %d", fd, i);
- priv_rx_intr_vec_disable(priv);
- return -1;
+ rte_errno = errno;
+ DRV_LOG(ERR,
+ "port %u failed to make Rx interrupt file"
+ " descriptor %d non-blocking for queue index"
+ " %d",
+ dev->data->port_id, fd, i);
+ mlx5_rx_intr_vec_disable(dev);
+ return -rte_errno;
}
intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
intr_handle->efds[count] = fd;
count++;
}
if (!count)
- priv_rx_intr_vec_disable(priv);
+ mlx5_rx_intr_vec_disable(dev);
else
intr_handle->nb_efd = count;
return 0;
@@ -431,18 +583,19 @@ priv_rx_intr_vec_enable(struct priv *priv)
/**
* Clean up Rx interrupts handler.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_rx_intr_vec_disable(struct priv *priv)
+mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct priv *priv = dev->data->dev_private;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
- if (!priv->dev->data->dev_conf.intr_conf.rxq)
+ if (!dev->data->dev_conf.intr_conf.rxq)
return;
if (!intr_handle->intr_vec)
goto free;
@@ -459,7 +612,7 @@ priv_rx_intr_vec_disable(struct priv *priv)
*/
rxq_data = (*priv->rxqs)[i];
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
+ mlx5_rxq_ibv_release(rxq_ctrl->ibv);
}
free:
rte_intr_free_epoll_fd(intr_handle);
@@ -502,7 +655,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
* Rx queue number.
*
* @return
- * 0 on success, negative on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
@@ -510,31 +663,25 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
- int ret = 0;
- priv_lock(priv);
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->irq) {
struct mlx5_rxq_ibv *rxq_ibv;
- rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
+ rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
if (!rxq_ibv) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
- mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
+ mlx5_rxq_ibv_release(rxq_ibv);
}
-exit:
- priv_unlock(priv);
- if (ret)
- WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
- return -ret;
+ return 0;
}
/**
@@ -546,7 +693,7 @@ exit:
* Rx queue number.
*
* @return
- * 0 on success, negative on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
@@ -557,53 +704,54 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct mlx5_rxq_ibv *rxq_ibv = NULL;
struct ibv_cq *ev_cq;
void *ev_ctx;
- int ret = 0;
+ int ret;
- priv_lock(priv);
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (!rxq_ctrl->irq)
- goto exit;
- rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
+ return 0;
+ rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
if (!rxq_ibv) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
if (ret || ev_cq != rxq_ibv->cq) {
- ret = EINVAL;
+ rte_errno = EINVAL;
goto exit;
}
rxq_data->cq_arm_sn++;
mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
+ return 0;
exit:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (rxq_ibv)
- mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
- priv_unlock(priv);
- if (ret)
- WARN("unable to disable interrupt on rx queue %d",
- rx_queue_id);
- return -ret;
+ mlx5_rxq_ibv_release(rxq_ibv);
+ DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
+ dev->data->port_id, rx_queue_id);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
* Create the Rx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
- * The Verbs object initialised if it can be created.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_rxq_ibv*
-mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
@@ -613,10 +761,16 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
struct ibv_cq_init_attr_ex ibv;
struct mlx5dv_cq_init_attr mlx5;
} cq;
- struct ibv_wq_init_attr wq;
+ struct {
+ struct ibv_wq_init_attr ibv;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ struct mlx5dv_wq_init_attr mlx5;
+#endif
+ } wq;
struct ibv_cq_ex cq_attr;
} attr;
- unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
+ unsigned int cqe_n;
+ unsigned int wqe_n = 1 << rxq_data->elts_n;
struct mlx5_rxq_ibv *tmpl;
struct mlx5dv_cq cq_info;
struct mlx5dv_rwq rwq;
@@ -624,6 +778,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
int ret = 0;
struct mlx5dv_obj obj;
struct mlx5_dev_config *config = &priv->config;
+ const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
assert(rxq_data);
assert(!rxq_ctrl->ibv);
@@ -632,28 +787,26 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
rxq_ctrl->socket);
if (!tmpl) {
- ERROR("%p: cannot allocate verbs resources",
- (void *)rxq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Rx queue %u cannot allocate verbs resources",
+ dev->data->port_id, rxq_ctrl->idx);
+ rte_errno = ENOMEM;
goto error;
}
tmpl->rxq_ctrl = rxq_ctrl;
- /* Use the entire RX mempool as the memory region. */
- tmpl->mr = priv_mr_get(priv, rxq_data->mp);
- if (!tmpl->mr) {
- tmpl->mr = priv_mr_new(priv, rxq_data->mp);
- if (!tmpl->mr) {
- ERROR("%p: MR creation failure", (void *)rxq_ctrl);
- goto error;
- }
- }
if (rxq_ctrl->irq) {
tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
if (!tmpl->channel) {
- ERROR("%p: Comp Channel creation failure",
- (void *)rxq_ctrl);
+ DRV_LOG(ERR, "port %u: comp channel creation failure",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
goto error;
}
}
+ if (mprq_en)
+ cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ else
+ cqe_n = wqe_n - 1;
attr.cq.ibv = (struct ibv_cq_init_attr_ex){
.cqe = cqe_n,
.channel = tmpl->channel,
@@ -670,27 +823,32 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
* For vectorized Rx, it must not be doubled in order to
* make cq_ci and rq_ci aligned.
*/
- if (rxq_check_vec_support(rxq_data) < 0)
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
} else if (config->cqe_comp && rxq_data->hw_timestamp) {
- DEBUG("Rx CQE compression is disabled for HW timestamp");
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for HW"
+ " timestamp",
+ dev->data->port_id);
}
tmpl->cq = mlx5_glue->cq_ex_to_cq
(mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
&attr.cq.mlx5));
if (tmpl->cq == NULL) {
- ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
+ DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
goto error;
}
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.orig_attr.max_sge);
- attr.wq = (struct ibv_wq_init_attr){
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
+ attr.wq.ibv = (struct ibv_wq_init_attr){
.wq_context = NULL, /* Could be useful in the future. */
.wq_type = IBV_WQT_RQ,
/* Max number of outstanding WRs. */
- .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
+ .max_wr = wqe_n >> rxq_data->sges_n,
/* Max number of scatter/gather elements in a WR. */
.max_sge = 1 << rxq_data->sges_n,
.pd = priv->pd,
@@ -704,32 +862,54 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
};
/* By default, FCS (CRC) is stripped by hardware. */
if (rxq_data->crc_present) {
- attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
- attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
if (config->hw_padding) {
- attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
- attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
+ attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
#endif
- tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
+ .comp_mask = 0,
+ };
+ if (mprq_en) {
+ struct mlx5dv_striding_rq_init_attr *mprq_attr =
+ &attr.wq.mlx5.striding_rq_attrs;
+
+ attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
+ *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
+ .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
+ .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
+ .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
+ };
+ }
+ tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv,
+ &attr.wq.mlx5);
+#else
+ tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv);
+#endif
if (tmpl->wq == NULL) {
- ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
+ DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
goto error;
}
/*
* Make sure number of WRs*SGEs match expectations since a queue
* cannot allocate more than "desc" buffers.
*/
- if (((int)attr.wq.max_wr !=
- ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
- ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
- ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
- (void *)rxq_ctrl,
- ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
- (1 << rxq_data->sges_n),
- attr.wq.max_wr, attr.wq.max_sge);
+ if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
+ attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u requested %u*%u but got %u*%u"
+ " WRs*SGEs",
+ dev->data->port_id, idx,
+ wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
+ attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
+ rte_errno = EINVAL;
goto error;
}
/* Change queue state to ready. */
@@ -739,8 +919,10 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
};
ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
if (ret) {
- ERROR("%p: WQ state to IBV_WQS_RDY failed",
- (void *)rxq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
+ dev->data->port_id, idx);
+ rte_errno = ret;
goto error;
}
obj.cq.in = tmpl->cq;
@@ -748,33 +930,53 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
obj.rwq.in = tmpl->wq;
obj.rwq.out = &rwq;
ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
- if (ret != 0)
+ if (ret) {
+ rte_errno = ret;
goto error;
+ }
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
goto error;
}
/* Fill the rings. */
- rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
- (uintptr_t)rwq.buf;
- for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
- struct rte_mbuf *buf = (*rxq_data->elts)[i];
- volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
-
+ rxq_data->wqes = rwq.buf;
+ for (i = 0; (i != wqe_n); ++i) {
+ volatile struct mlx5_wqe_data_seg *scat;
+ uintptr_t addr;
+ uint32_t byte_count;
+
+ if (mprq_en) {
+ struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i];
+
+ scat = &((volatile struct mlx5_wqe_mprq *)
+ rxq_data->wqes)[i].dseg;
+ addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
+ byte_count = (1 << rxq_data->strd_sz_n) *
+ (1 << rxq_data->strd_num_n);
+ } else {
+ struct rte_mbuf *buf = (*rxq_data->elts)[i];
+
+ scat = &((volatile struct mlx5_wqe_data_seg *)
+ rxq_data->wqes)[i];
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ byte_count = DATA_LEN(buf);
+ }
/* scat->addr must be able to store a pointer. */
assert(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
- .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
- uintptr_t)),
- .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
- .lkey = tmpl->mr->lkey,
+ .addr = rte_cpu_to_be_64(addr),
+ .byte_count = rte_cpu_to_be_32(byte_count),
+ .lkey = mlx5_rx_addr2mr(rxq_data, addr),
};
}
rxq_data->rq_db = rwq.dbrec;
rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
rxq_data->cq_ci = 0;
- rxq_data->rq_ci = 0;
+ rxq_data->strd_ci = 0;
rxq_data->rq_pi = 0;
rxq_data->zip = (struct rxq_zip){
.ai = 0,
@@ -785,43 +987,45 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
rxq_data->cqn = cq_info.cqn;
rxq_data->cq_arm_sn = 0;
/* Update doorbell counter. */
- rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
+ rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
rte_wmb();
*rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
- DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
- (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
+ dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (tmpl->wq)
claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
if (tmpl->cq)
claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
if (tmpl->channel)
claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
- if (tmpl->mr)
- priv_mr_release(priv, tmpl->mr);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ rte_errno = ret; /* Restore rte_errno. */
return NULL;
}
/**
* Get an Rx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
* The Verbs object if it exists.
*/
-struct mlx5_rxq_ibv*
-mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl;
@@ -831,11 +1035,10 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
return NULL;
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->ibv) {
- priv_mr_get(priv, rxq_data->mp);
rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
- (void *)rxq_ctrl->ibv,
- rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
+ dev->data->port_id, rxq_ctrl->idx,
+ rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
}
return rxq_ctrl->ibv;
}
@@ -843,28 +1046,21 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
/**
* Release an Rx verbs queue object.
*
- * @param priv
- * Pointer to private structure.
* @param rxq_ibv
* Verbs Rx queue object.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
+mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
{
- int ret;
-
assert(rxq_ibv);
assert(rxq_ibv->wq);
assert(rxq_ibv->cq);
- assert(rxq_ibv->mr);
- ret = priv_mr_release(priv, rxq_ibv->mr);
- if (!ret)
- rxq_ibv->mr = NULL;
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
- (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
+ PORT_ID(rxq_ibv->rxq_ctrl->priv),
+ rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
rxq_free_elts(rxq_ibv->rxq_ctrl);
claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
@@ -876,26 +1072,28 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
rte_free(rxq_ibv);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Verify the Verbs Rx queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_rxq_ibv_verify(struct priv *priv)
+mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_rxq_ibv *rxq_ibv;
LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
- DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv,
- (void *)rxq_ibv);
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
+ dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
++ret;
}
return ret;
@@ -904,65 +1102,272 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv)
/**
* Return true if a single reference exists on the object.
*
- * @param priv
- * Pointer to private structure.
* @param rxq_ibv
* Verbs Rx queue object.
*/
int
-mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
+mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
{
- (void)priv;
assert(rxq_ibv);
return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
}
/**
+ * Callback function to initialize mbufs for Multi-Packet RQ.
+ */
+static inline void
+mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
+ void *_m, unsigned int i __rte_unused)
+{
+ struct mlx5_mprq_buf *buf = _m;
+
+ memset(_m, 0, sizeof(*buf));
+ buf->mp = mp;
+ rte_atomic16_set(&buf->refcnt, 1);
+}
+
+/**
+ * Free mempool of Multi-Packet RQ.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_mprq_free_mp(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mempool *mp = priv->mprq_mp;
+ unsigned int i;
+
+ if (mp == NULL)
+ return 0;
+ DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
+ dev->data->port_id, mp->name);
+ /*
+ * If a buffer in the pool has been externally attached to a mbuf and it
+ * is still in use by application, destroying the Rx qeueue can spoil
+ * the packet. It is unlikely to happen but if application dynamically
+ * creates and destroys with holding Rx packets, this can happen.
+ *
+ * TODO: It is unavoidable for now because the mempool for Multi-Packet
+ * RQ isn't provided by application but managed by PMD.
+ */
+ if (!rte_mempool_full(mp)) {
+ DRV_LOG(ERR,
+ "port %u mempool for Multi-Packet RQ is still in use",
+ dev->data->port_id);
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ rte_mempool_free(mp);
+ /* Unset mempool for each Rx queue. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ rxq->mprq_mp = NULL;
+ }
+ return 0;
+}
+
+/**
+ * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
+ * mempool. If already allocated, reuse it if there're enough elements.
+ * Otherwise, resize it.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mempool *mp = priv->mprq_mp;
+ char name[RTE_MEMPOOL_NAMESIZE];
+ unsigned int desc = 0;
+ unsigned int buf_len;
+ unsigned int obj_num;
+ unsigned int obj_size;
+ unsigned int strd_num_n = 0;
+ unsigned int strd_sz_n = 0;
+ unsigned int i;
+
+ if (!mlx5_mprq_enabled(dev))
+ return 0;
+ /* Count the total number of descriptors configured. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ desc += 1 << rxq->elts_n;
+ /* Get the max number of strides. */
+ if (strd_num_n < rxq->strd_num_n)
+ strd_num_n = rxq->strd_num_n;
+ /* Get the max size of a stride. */
+ if (strd_sz_n < rxq->strd_sz_n)
+ strd_sz_n = rxq->strd_sz_n;
+ }
+ assert(strd_num_n && strd_sz_n);
+ buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
+ obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
+ /*
+ * Received packets can be either memcpy'd or externally referenced. In
+ * case that the packet is attached to an mbuf as an external buffer, as
+ * it isn't possible to predict how the buffers will be queued by
+ * application, there's no option to exactly pre-allocate needed buffers
+ * in advance but to speculatively prepares enough buffers.
+ *
+ * In the data path, if this Mempool is depleted, PMD will try to memcpy
+ * received packets to buffers provided by application (rxq->mp) until
+ * this Mempool gets available again.
+ */
+ desc *= 4;
+ obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
+ /* Check a mempool is already allocated and if it can be resued. */
+ if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
+ DRV_LOG(DEBUG, "port %u mempool %s is being reused",
+ dev->data->port_id, mp->name);
+ /* Reuse. */
+ goto exit;
+ } else if (mp != NULL) {
+ DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
+ dev->data->port_id, mp->name);
+ /*
+ * If failed to free, which means it may be still in use, no way
+ * but to keep using the existing one. On buffer underrun,
+ * packets will be memcpy'd instead of external buffer
+ * attachment.
+ */
+ if (mlx5_mprq_free_mp(dev)) {
+ if (mp->elt_size >= obj_size)
+ goto exit;
+ else
+ return -rte_errno;
+ }
+ }
+ snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
+ mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
+ 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
+ dev->device->numa_node, 0);
+ if (mp == NULL) {
+ DRV_LOG(ERR,
+ "port %u failed to allocate a mempool for"
+ " Multi-Packet RQ, count=%u, size=%u",
+ dev->data->port_id, obj_num, obj_size);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ priv->mprq_mp = mp;
+exit:
+ /* Set mempool for each Rx queue. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ rxq->mprq_mp = mp;
+ }
+ DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
+ dev->data->port_id);
+ return 0;
+}
+
+/**
* Create a DPDK Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
* NUMA socket on which memory must be allocated.
*
* @return
- * A DPDK queue object on success.
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
-struct mlx5_rxq_ctrl*
-mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+struct mlx5_rxq_ctrl *
+mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
{
- struct rte_eth_dev *dev = priv->dev;
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ unsigned int mprq_stride_size;
struct mlx5_dev_config *config = &priv->config;
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
*/
- const uint16_t desc_n =
+ uint16_t desc_n =
desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ uint64_t offloads = conf->offloads |
+ dev->data->dev_conf.rxmode.offloads;
+ const int mprq_en = mlx5_check_mprq_support(dev) > 0;
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
desc_n * sizeof(struct rte_mbuf *),
0, socket);
- if (!tmpl)
+ if (!tmpl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
+ if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
+ MLX5_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
tmpl->socket = socket;
- if (priv->dev->data->dev_conf.intr_conf.rxq)
+ if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
- /* Enable scattered packets support for this queue if necessary. */
+ /*
+ * This Rx queue can be configured as a Multi-Packet RQ if all of the
+ * following conditions are met:
+ * - MPRQ is enabled.
+ * - The number of descs is more than the number of strides.
+ * - max_rx_pkt_len plus overhead is less than the max size of a
+ * stride.
+ * Otherwise, enable Rx scatter if necessary.
+ */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ mprq_stride_size =
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ sizeof(struct rte_mbuf_ext_shared_info) +
+ RTE_PKTMBUF_HEADROOM;
+ if (mprq_en &&
+ desc >= (1U << config->mprq.stride_num_n) &&
+ mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
+ /* TODO: Rx scatter isn't supported yet. */
+ tmpl->rxq.sges_n = 0;
+ /* Trim the number of descs needed. */
+ desc >>= config->mprq.stride_num_n;
+ tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
+ tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
+ config->mprq.min_stride_size_n);
+ tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
+ tmpl->rxq.mprq_max_memcpy_len =
+ RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
+ config->mprq.max_memcpy_len);
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u: Multi-Packet RQ is enabled"
+ " strd_num_n = %u, strd_sz_n = %u",
+ dev->data->port_id, idx,
+ tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
+ } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
- } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -978,57 +1383,63 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
size = mb_len * (1 << tmpl->rxq.sges_n);
size -= RTE_PKTMBUF_HEADROOM;
if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
- ERROR("%p: too many SGEs (%u) needed to handle"
- " requested maximum packet size %u",
- (void *)dev,
- 1 << sges_n,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ DRV_LOG(ERR,
+ "port %u too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u",
+ dev->data->port_id,
+ 1 << sges_n,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ rte_errno = EOVERFLOW;
goto error;
}
} else {
- WARN("%p: the requested maximum Rx packet size (%u) is"
- " larger than a single mbuf (%u) and scattered"
- " mode has not been requested",
- (void *)dev,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- mb_len - RTE_PKTMBUF_HEADROOM);
- }
- DEBUG("%p: maximum number of segments per packet: %u",
- (void *)dev, 1 << tmpl->rxq.sges_n);
+ DRV_LOG(WARNING,
+ "port %u the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered mode has"
+ " not been requested",
+ dev->data->port_id,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
+ }
+ DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
+ dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
- ERROR("%p: number of RX queue descriptors (%u) is not a"
- " multiple of SGEs per packet (%u)",
- (void *)dev,
- desc,
- 1 << tmpl->rxq.sges_n);
+ DRV_LOG(ERR,
+ "port %u number of Rx queue descriptors (%u) is not a"
+ " multiple of SGEs per packet (%u)",
+ dev->data->port_id,
+ desc,
+ 1 << tmpl->rxq.sges_n);
+ rte_errno = EINVAL;
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
- tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
- tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
- priv->config.hw_csum_l2tun);
- tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
- if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
tmpl->rxq.crc_present = 0;
} else if (config->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
} else {
- WARN("%p: CRC stripping has been disabled but will still"
- " be performed by hardware, make sure MLNX_OFED and"
- " firmware are up to date",
- (void *)dev);
+ DRV_LOG(WARNING,
+ "port %u CRC stripping has been disabled but will"
+ " still be performed by hardware, make sure MLNX_OFED"
+ " and firmware are up to date",
+ dev->data->port_id);
tmpl->rxq.crc_present = 0;
}
- DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
- " incoming frames to hide it",
- (void *)dev,
- tmpl->rxq.crc_present ? "disabled" : "enabled",
- tmpl->rxq.crc_present << 2);
+ DRV_LOG(DEBUG,
+ "port %u CRC stripping is %s, %u bytes will be subtracted from"
+ " incoming frames to hide it",
+ dev->data->port_id,
+ tmpl->rxq.crc_present ? "disabled" : "enabled",
+ tmpl->rxq.crc_present << 2);
/* Save port ID. */
- tmpl->rxq.rss_hash = priv->rxqs_n > 1;
+ tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
+ (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
tmpl->priv = priv;
tmpl->rxq.mp = mp;
@@ -1036,9 +1447,10 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts =
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
+ tmpl->idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
- (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
+ idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
@@ -1049,28 +1461,29 @@ error:
/**
* Get a Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
- * A pointer to the queue if it exists.
+ * A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_rxq_ctrl*
-mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
+struct mlx5_rxq_ctrl *
+mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
if ((*priv->rxqs)[idx]) {
rxq_ctrl = container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl,
rxq);
-
- mlx5_priv_rxq_ibv_get(priv, idx);
+ mlx5_rxq_ibv_get(dev, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
- (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
+ DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
+ dev->data->port_id, rxq_ctrl->idx,
+ rte_atomic32_read(&rxq_ctrl->refcnt));
}
return rxq_ctrl;
}
@@ -1078,59 +1491,60 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
/**
* Release a Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
+mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
if (!(*priv->rxqs)[idx])
return 0;
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
assert(rxq_ctrl->priv);
- if (rxq_ctrl->ibv) {
- int ret;
-
- ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
- if (!ret)
- rxq_ctrl->ibv = NULL;
- }
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
- (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
+ if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
+ rxq_ctrl->ibv = NULL;
+ DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
+ rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
+ mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
rte_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Verify if the queue can be released.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
- * 1 if the queue can be released.
+ * 1 if the queue can be released, negative errno otherwise and rte_errno is
+ * set.
*/
int
-mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
+mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
- if (!(*priv->rxqs)[idx])
- return -1;
+ if (!(*priv->rxqs)[idx]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
}
@@ -1138,20 +1552,22 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
/**
* Verify the Rx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_rxq_verify(struct priv *priv)
+mlx5_rxq_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
- DEBUG("%p: Rx Queue %p still referenced", (void *)priv,
- (void *)rxq_ctrl);
+ DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
+ dev->data->port_id, rxq_ctrl->idx);
++ret;
}
return ret;
@@ -1160,20 +1576,21 @@ mlx5_priv_rxq_verify(struct priv *priv)
/**
* Create an indirection table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param queues
* Queues entering in the indirection table.
* @param queues_n
* Number of queues in the array.
*
* @return
- * A new indirection table.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_ind_table_ibv*
-mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
- uint16_t queues_n)
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
@@ -1184,11 +1601,12 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
queues_n * sizeof(uint16_t), 0);
- if (!ind_tbl)
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq =
- mlx5_priv_rxq_get(priv, queues[i]);
+ struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
if (!rxq)
goto error;
@@ -1206,24 +1624,28 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
.ind_tbl = wq,
.comp_mask = 0,
});
- if (!ind_tbl->ind_table)
+ if (!ind_tbl->ind_table) {
+ rte_errno = errno;
goto error;
+ }
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
- (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+ DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
+ dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
+ rte_atomic32_read(&ind_tbl->refcnt));
return ind_tbl;
error:
rte_free(ind_tbl);
- DEBUG("%p cannot create indirection table", (void *)priv);
+ DRV_LOG(DEBUG, "port %u cannot create indirection table",
+ dev->data->port_id);
return NULL;
}
/**
* Get an indirection table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param queues
* Queues entering in the indirection table.
* @param queues_n
@@ -1232,10 +1654,11 @@ error:
* @return
* An indirection table if found.
*/
-struct mlx5_ind_table_ibv*
-mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
- uint16_t queues_n)
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
@@ -1249,10 +1672,11 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
unsigned int i;
rte_atomic32_inc(&ind_tbl->refcnt);
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
- (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+ DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
+ dev->data->port_id, (void *)ind_tbl,
+ rte_atomic32_read(&ind_tbl->refcnt));
for (i = 0; i != ind_tbl->queues_n; ++i)
- mlx5_priv_rxq_get(priv, ind_tbl->queues[i]);
+ mlx5_rxq_get(dev, ind_tbl->queues[i]);
}
return ind_tbl;
}
@@ -1260,52 +1684,59 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
/**
* Release an indirection table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param ind_table
* Indirection table to release.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_ind_table_ibv_release(struct priv *priv,
- struct mlx5_ind_table_ibv *ind_tbl)
+mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl)
{
unsigned int i;
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
- (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
- if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
+ DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
+ dev->data->port_id, (void *)ind_tbl,
+ rte_atomic32_read(&ind_tbl->refcnt));
+ if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
claim_zero(mlx5_glue->destroy_rwq_ind_table
(ind_tbl->ind_table));
+ DEBUG("port %u delete indirection table %p: queues: %u",
+ dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
+ }
for (i = 0; i != ind_tbl->queues_n; ++i)
- claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
+ claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
LIST_REMOVE(ind_tbl, next);
rte_free(ind_tbl);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Verify the Rx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_ind_table_ibv_verify(struct priv *priv)
+mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
int ret = 0;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
- DEBUG("%p: Verbs indirection table %p still referenced",
- (void *)priv, (void *)ind_tbl);
+ DRV_LOG(DEBUG,
+ "port %u Verbs indirection table %p still referenced",
+ dev->data->port_id, (void *)ind_tbl);
++ret;
}
return ret;
@@ -1314,8 +1745,8 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv)
/**
* Create an Rx Hash queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param rss_key
* RSS key for the Rx hash queue.
* @param rss_key_len
@@ -1327,24 +1758,79 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv)
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
+ * @param tunnel
+ * Tunnel type, implies tunnel offloading like inner checksum if available.
+ * @param rss_level
+ * RSS hash on tunnel level.
*
* @return
- * An hash Rx queue on success.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_hrxq*
-mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
- uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+struct mlx5_hrxq *
+mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ uint32_t tunnel, uint32_t rss_level)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
+ int err;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ struct mlx5dv_qp_init_attr qp_init_attr = {0};
+#endif
queues_n = hash_fields ? queues_n : 1;
- ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
- if (!ind_tbl)
- ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
+ if (!rss_key_len) {
+ rss_key_len = rss_hash_default_key_len;
+ rss_key = rss_hash_default_key;
+ }
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (tunnel) {
+ qp_init_attr.comp_mask =
+ MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
+ qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
+ }
+ qp = mlx5_glue->dv_create_qp
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_key_len ? rss_key_len :
+ rss_hash_default_key_len,
+ .rx_hash_key = rss_key ?
+ (void *)(uintptr_t)rss_key :
+ rss_hash_default_key,
+ .rx_hash_fields_mask = hash_fields |
+ (tunnel && rss_level > 1 ?
+ (uint32_t)IBV_RX_HASH_INNER : 0),
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd,
+ },
+ &qp_init_attr);
+ DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
+ " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64
+ " create_flags:0x%x",
+ dev->data->port_id, (void *)qp, (void *)ind_tbl,
+ (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) |
+ hash_fields, tunnel, rss_level,
+ qp_init_attr.comp_mask, qp_init_attr.create_flags);
+#else
qp = mlx5_glue->create_qp_ex
(priv->ctx,
&(struct ibv_qp_init_attr_ex){
@@ -1355,15 +1841,25 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
IBV_QP_INIT_ATTR_RX_HASH,
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_key_len,
- .rx_hash_key = rss_key,
+ .rx_hash_key_len = rss_key_len ? rss_key_len :
+ rss_hash_default_key_len,
+ .rx_hash_key = rss_key ?
+ (void *)(uintptr_t)rss_key :
+ rss_hash_default_key,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
});
- if (!qp)
+ DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
+ " tunnel:0x%x level:%hhu",
+ dev->data->port_id, (void *)qp, (void *)ind_tbl,
+ hash_fields, tunnel, rss_level);
+#endif
+ if (!qp) {
+ rte_errno = errno;
goto error;
+ }
hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
if (!hrxq)
goto error;
@@ -1371,24 +1867,29 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
hrxq->qp = qp;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
+ hrxq->tunnel = tunnel;
+ hrxq->rss_level = rss_level;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
- (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+ DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
+ dev->data->port_id, (void *)hrxq,
+ rte_atomic32_read(&hrxq->refcnt));
return hrxq;
error:
- mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
if (qp)
claim_zero(mlx5_glue->destroy_qp(qp));
+ rte_errno = err; /* Restore rte_errno. */
return NULL;
}
/**
* Get an Rx Hash queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param rss_conf
* RSS configuration for the Rx hash queue.
* @param queues
@@ -1396,14 +1897,22 @@ error:
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
+ * @param tunnel
+ * Tunnel type, implies tunnel offloading like inner checksum if available.
+ * @param rss_level
+ * RSS hash on tunnel level
*
* @return
* An hash Rx queue on success.
*/
-struct mlx5_hrxq*
-mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
- uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+struct mlx5_hrxq *
+mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ uint32_t tunnel, uint32_t rss_level)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
queues_n = hash_fields ? queues_n : 1;
@@ -1416,16 +1925,21 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
continue;
if (hrxq->hash_fields != hash_fields)
continue;
- ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
+ if (hrxq->tunnel != tunnel)
+ continue;
+ if (hrxq->rss_level != rss_level)
+ continue;
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
- mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
- (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+ DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
+ dev->data->port_id, (void *)hrxq,
+ rte_atomic32_read(&hrxq->refcnt));
return hrxq;
}
return NULL;
@@ -1434,47 +1948,55 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
/**
* Release the hash Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param hrxq
* Pointer to Hash Rx queue to release.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
+mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
- (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+ DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
+ dev->data->port_id, (void *)hrxq,
+ rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
+ DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
+ " 0x%x, level: %u",
+ dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
+ hrxq->tunnel, hrxq->rss_level);
+ mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
return 0;
}
- claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table));
- return EBUSY;
+ claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
+ return 1;
}
/**
* Verify the Rx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_hrxq_ibv_verify(struct priv *priv)
+mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
int ret = 0;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
- DEBUG("%p: Verbs Hash Rx queue %p still referenced",
- (void *)priv, (void *)hrxq);
+ DRV_LOG(DEBUG,
+ "port %u Verbs hash Rx queue %p still referenced",
+ dev->data->port_id, (void *)hrxq);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index dc4ead93..52785946 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <assert.h>
@@ -34,19 +34,29 @@
#include "mlx5_prm.h"
static __rte_always_inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
static __rte_always_inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
uint16_t cqe_cnt, uint32_t *rss_hash);
static __rte_always_inline uint32_t
-rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
+rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
+
+static __rte_always_inline void
+rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
+ volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
+
+static __rte_always_inline void
+mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx);
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
};
+uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
+uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
+
/**
* Build a table to translate Rx completion flags to packet type.
*
@@ -86,6 +96,14 @@ mlx5_set_ptype_table(void)
RTE_PTYPE_L4_TCP;
(*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
+ (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
/* UDP */
(*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
@@ -104,17 +122,27 @@ mlx5_set_ptype_table(void)
RTE_PTYPE_L4_TCP;
(*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
+ (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
(*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
(*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
/* Tunneled - L3 */
+ (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
(*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
(*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
(*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
@@ -141,12 +169,36 @@ mlx5_set_ptype_table(void)
(*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
(*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
(*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
/* Tunneled - UDP */
(*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
@@ -163,6 +215,74 @@ mlx5_set_ptype_table(void)
}
/**
+ * Build a table to translate packet to checksum type of Verbs.
+ */
+void
+mlx5_set_cksum_table(void)
+{
+ unsigned int i;
+ uint8_t v;
+
+ /*
+ * The index should have:
+ * bit[0] = PKT_TX_TCP_SEG
+ * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+ * bit[4] = PKT_TX_IP_CKSUM
+ * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[9] = tunnel
+ */
+ for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
+ v = 0;
+ if (i & (1 << 9)) {
+ /* Tunneled packet. */
+ if (i & (1 << 8)) /* Outer IP. */
+ v |= MLX5_ETH_WQE_L3_CSUM;
+ if (i & (1 << 4)) /* Inner IP. */
+ v |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+ v |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ } else {
+ /* No tunnel. */
+ if (i & (1 << 4)) /* IP. */
+ v |= MLX5_ETH_WQE_L3_CSUM;
+ if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+ v |= MLX5_ETH_WQE_L4_CSUM;
+ }
+ mlx5_cksum_table[i] = v;
+ }
+}
+
+/**
+ * Build a table to translate packet type of mbuf to SWP type of Verbs.
+ */
+void
+mlx5_set_swp_types_table(void)
+{
+ unsigned int i;
+ uint8_t v;
+
+ /*
+ * The index should have:
+ * bit[0:1] = PKT_TX_L4_MASK
+ * bit[4] = PKT_TX_IPV6
+ * bit[8] = PKT_TX_OUTER_IPV6
+ * bit[9] = PKT_TX_OUTER_UDP
+ */
+ for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
+ v = 0;
+ if (i & (1 << 8))
+ v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
+ if (i & (1 << 9))
+ v |= MLX5_ETH_WQE_L4_OUTER_UDP;
+ if (i & (1 << 4))
+ v |= MLX5_ETH_WQE_L3_INNER_IPV6;
+ if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
+ v |= MLX5_ETH_WQE_L4_INNER_UDP;
+ mlx5_swp_types_table[i] = v;
+ }
+}
+
+/**
* Return the size of tailroom of WQ.
*
* @param txq
@@ -219,6 +339,60 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
}
/**
+ * Inline TSO headers into WQE.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+ uint32_t *length,
+ uintptr_t *addr,
+ uint16_t *pkt_inline_sz,
+ uint8_t **raw,
+ uint16_t *max_wqe,
+ uint16_t *tso_segsz,
+ uint16_t *tso_header_sz)
+{
+ uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+ unsigned int copy_b;
+ uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
+ const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags &
+ PKT_TX_TUNNEL_MASK);
+ uint16_t n_wqe;
+
+ *tso_segsz = buf->tso_segsz;
+ *tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len;
+ if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) {
+ txq->stats.oerrors++;
+ return -EINVAL;
+ }
+ if (tunneled)
+ *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
+ /* First seg must contain all TSO headers. */
+ if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) ||
+ *tso_header_sz > DATA_LEN(buf)) {
+ txq->stats.oerrors++;
+ return -EINVAL;
+ }
+ copy_b = *tso_header_sz - *pkt_inline_sz;
+ if (!copy_b || ((end - (uintptr_t)*raw) < copy_b))
+ return -EAGAIN;
+ n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+ if (unlikely(*max_wqe < n_wqe))
+ return -EINVAL;
+ *max_wqe -= n_wqe;
+ rte_memcpy((void *)*raw, (void *)*addr, copy_b);
+ *length -= copy_b;
+ *addr += copy_b;
+ copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE;
+ *pkt_inline_sz += copy_b;
+ *raw += copy_b;
+ return 0;
+}
+
+/**
* DPDK callback to check the status of a tx descriptor.
*
* @param tx_queue
@@ -335,7 +509,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (unlikely(!max_wqe))
return 0;
do {
- struct rte_mbuf *buf = NULL;
+ struct rte_mbuf *buf = *pkts; /* First_seg. */
uint8_t *raw;
volatile struct mlx5_wqe_v *wqe = NULL;
volatile rte_v128u32_t *dseg = NULL;
@@ -347,14 +521,15 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t tso_header_sz = 0;
uint16_t ehdr;
uint8_t cs_flags;
- uint64_t tso = 0;
+ uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
+ uint32_t swp_offsets = 0;
+ uint8_t swp_types = 0;
uint16_t tso_segsz = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
#endif
+ int ret;
- /* first_seg */
- buf = *pkts;
segs_n = buf->nb_segs;
/*
* Make sure there is enough room to store this packet and
@@ -389,7 +564,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (pkts_n - i > 1)
rte_prefetch0(
rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
+ txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types);
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
@@ -415,54 +591,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
addr += pkt_inline_sz;
}
raw += MLX5_WQE_DWORD_SIZE;
- tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
if (tso) {
- uintptr_t end =
- (uintptr_t)(((uintptr_t)txq->wqes) +
- (1 << txq->wqe_n) * MLX5_WQE_SIZE);
- unsigned int copy_b;
- uint8_t vlan_sz =
- (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
- const uint64_t is_tunneled =
- buf->ol_flags & (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- tso_header_sz = buf->l2_len + vlan_sz +
- buf->l3_len + buf->l4_len;
- tso_segsz = buf->tso_segsz;
- if (unlikely(tso_segsz == 0)) {
- txq->stats.oerrors++;
+ ret = inline_tso(txq, buf, &length,
+ &addr, &pkt_inline_sz,
+ &raw, &max_wqe,
+ &tso_segsz, &tso_header_sz);
+ if (ret == -EINVAL) {
break;
- }
- if (is_tunneled && txq->tunnel_en) {
- tso_header_sz += buf->outer_l2_len +
- buf->outer_l3_len;
- cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
- } else {
- cs_flags |= MLX5_ETH_WQE_L4_CSUM;
- }
- if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) {
- txq->stats.oerrors++;
- break;
- }
- copy_b = tso_header_sz - pkt_inline_sz;
- /* First seg must contain all headers. */
- assert(copy_b <= length);
- if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
- uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
-
- if (unlikely(max_wqe < n))
- break;
- max_wqe -= n;
- rte_memcpy((void *)raw, (void *)addr, copy_b);
- addr += copy_b;
- length -= copy_b;
- /* Include padding for TSO header. */
- copy_b = MLX5_WQE_DS(copy_b) *
- MLX5_WQE_DWORD_SIZE;
- pkt_inline_sz += copy_b;
- raw += copy_b;
- } else {
+ } else if (ret == -EAGAIN) {
/* NOP WQE. */
wqe->ctrl = (rte_v128u32_t){
rte_cpu_to_be_32(txq->wqe_ci << 8),
@@ -507,7 +643,8 @@ pkt_inline:
if (unlikely(max_wqe < n))
break;
max_wqe -= n;
- if (tso && !inl) {
+ if (tso) {
+ assert(inl == 0);
inl = rte_cpu_to_be_32(copy_b |
MLX5_INLINE_SEG);
rte_memcpy((void *)raw,
@@ -542,8 +679,17 @@ pkt_inline:
} else if (!segs_n) {
goto next_pkt;
} else {
- raw += copy_b;
- inline_room -= copy_b;
+ /*
+ * Further inline the next segment only for
+ * non-TSO packets.
+ */
+ if (!tso) {
+ raw += copy_b;
+ inline_room -= copy_b;
+ } else {
+ inline_room = 0;
+ }
+ /* Move to the next segment. */
--segs_n;
buf = buf->next;
assert(buf);
@@ -633,8 +779,9 @@ next_pkt:
0,
};
wqe->eseg = (rte_v128u32_t){
- 0,
- cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
+ swp_offsets,
+ cs_flags | (swp_types << 8) |
+ (rte_cpu_to_be_16(tso_segsz) << 16),
0,
(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
};
@@ -647,8 +794,8 @@ next_pkt:
0,
};
wqe->eseg = (rte_v128u32_t){
- 0,
- cs_flags,
+ swp_offsets,
+ cs_flags | (swp_types << 8),
0,
(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
};
@@ -820,7 +967,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
max_elts -= segs_n;
--pkts_n;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Retrieve packet information. */
length = PKT_LEN(buf);
assert(length);
@@ -1052,7 +1199,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
* iteration.
*/
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if packet differs. */
@@ -1320,7 +1467,6 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
do {
struct rte_mbuf *buf = *(pkts++);
uintptr_t addr;
- unsigned int n;
unsigned int do_inline = 0; /* Whether inline is possible. */
uint32_t length;
uint8_t cs_flags;
@@ -1330,7 +1476,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
/* Make sure there is enough room to store this packet. */
if (max_elts - j == 0)
break;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if:
@@ -1382,7 +1528,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
(!txq->mpw_hdr_dseg ||
mpw.total_len >= MLX5_WQE_SIZE);
}
- if (do_inline) {
+ if (max_inline && do_inline) {
/* Inline packet into WQE. */
unsigned int max;
@@ -1440,11 +1586,8 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
((uintptr_t)mpw.data.raw +
inl_pad);
(*txq->elts)[elts_head++ & elts_m] = buf;
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
- rte_prefetch2((void *)(addr +
- n * RTE_CACHE_LINE_SIZE));
- addr = rte_cpu_to_be_64(addr);
+ addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
+ uintptr_t));
*dseg = (rte_v128u32_t) {
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
@@ -1541,6 +1684,8 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/**
* Translate RX completion flags to packet type.
*
+ * @param[in] rxq
+ * Pointer to RX queue structure.
* @param[in] cqe
* Pointer to CQE.
*
@@ -1550,7 +1695,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Packet type for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
{
uint8_t idx;
uint8_t pinfo = cqe->pkt_info;
@@ -1565,7 +1710,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
* bit[7] = outer_l3_type
*/
idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
- return mlx5_ptype_table[idx];
+ return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
}
/**
@@ -1688,8 +1833,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
/**
* Translate RX completion flags to offload flags.
*
- * @param[in] rxq
- * Pointer to RX queue structure.
* @param[in] cqe
* Pointer to CQE.
*
@@ -1697,7 +1840,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
* Offload flags (ol_flags) for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
+rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
{
uint32_t ol_flags = 0;
uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
@@ -1709,18 +1852,56 @@ rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
TRANSPOSE(flags,
MLX5_CQE_RX_L4_HDR_VALID,
PKT_RX_L4_CKSUM_GOOD);
- if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
- ol_flags |=
- TRANSPOSE(flags,
- MLX5_CQE_RX_L3_HDR_VALID,
- PKT_RX_IP_CKSUM_GOOD) |
- TRANSPOSE(flags,
- MLX5_CQE_RX_L4_HDR_VALID,
- PKT_RX_L4_CKSUM_GOOD);
return ol_flags;
}
/**
+ * Fill in mbuf fields from RX completion flags.
+ * Note that pkt->ol_flags should be initialized outside of this function.
+ *
+ * @param rxq
+ * Pointer to RX queue.
+ * @param pkt
+ * mbuf to fill.
+ * @param cqe
+ * CQE to process.
+ * @param rss_hash_res
+ * Packet RSS Hash result.
+ */
+static inline void
+rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
+ volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
+{
+ /* Update packet information. */
+ pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
+ if (rss_hash_res && rxq->rss_hash) {
+ pkt->hash.rss = rss_hash_res;
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ }
+ if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
+ pkt->ol_flags |= PKT_RX_FDIR;
+ if (cqe->sop_drop_qpn !=
+ rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
+ uint32_t mark = cqe->sop_drop_qpn;
+
+ pkt->ol_flags |= PKT_RX_FDIR_ID;
+ pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
+ }
+ }
+ if (rxq->csum)
+ pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
+ if (rxq->vlan_strip &&
+ (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
+ pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
+ }
+ if (rxq->hw_timestamp) {
+ pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
+ pkt->ol_flags |= PKT_RX_TIMESTAMP;
+ }
+}
+
+/**
* DPDK callback for RX.
*
* @param dpdk_rxq
@@ -1750,7 +1931,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
while (pkts_n) {
unsigned int idx = rq_ci & wqe_cnt;
- volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
+ volatile struct mlx5_wqe_data_seg *wqe =
+ &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
struct rte_mbuf *rep = (*rxq->elts)[idx];
uint32_t rss_hash_res = 0;
@@ -1796,40 +1978,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
pkt = seg;
assert(len >= (rxq->crc_present << 2));
- /* Update packet information. */
- pkt->packet_type = rxq_cq_to_pkt_type(cqe);
pkt->ol_flags = 0;
- if (rss_hash_res && rxq->rss_hash) {
- pkt->hash.rss = rss_hash_res;
- pkt->ol_flags = PKT_RX_RSS_HASH;
- }
- if (rxq->mark &&
- MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
- pkt->ol_flags |= PKT_RX_FDIR;
- if (cqe->sop_drop_qpn !=
- rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
- uint32_t mark = cqe->sop_drop_qpn;
-
- pkt->ol_flags |= PKT_RX_FDIR_ID;
- pkt->hash.fdir.hi =
- mlx5_flow_mark_get(mark);
- }
- }
- if (rxq->csum | rxq->csum_l2tun)
- pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
- if (rxq->vlan_strip &&
- (cqe->hdr_type_etc &
- rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
- pkt->ol_flags |= PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED;
- pkt->vlan_tci =
- rte_be_to_cpu_16(cqe->vlan_info);
- }
- if (rxq->hw_timestamp) {
- pkt->timestamp =
- rte_be_to_cpu_64(cqe->timestamp);
- pkt->ol_flags |= PKT_RX_TIMESTAMP;
- }
+ rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (rxq->crc_present)
len -= ETHER_CRC_LEN;
PKT_LEN(pkt) = len;
@@ -1845,6 +1995,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* changes.
*/
wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
if (len > DATA_LEN(seg)) {
len -= DATA_LEN(seg);
++NB_SEGS(pkt);
@@ -1882,6 +2035,236 @@ skip:
return i;
}
+void
+mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
+{
+ struct mlx5_mprq_buf *buf = opaque;
+
+ if (rte_atomic16_read(&buf->refcnt) == 1) {
+ rte_mempool_put(buf->mp, buf);
+ } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
+ rte_atomic16_set(&buf->refcnt, 1);
+ rte_mempool_put(buf->mp, buf);
+ }
+}
+
+void
+mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
+{
+ mlx5_mprq_buf_free_cb(NULL, buf);
+}
+
+static inline void
+mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
+{
+ struct mlx5_mprq_buf *rep = rxq->mprq_repl;
+ volatile struct mlx5_wqe_data_seg *wqe =
+ &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
+ void *addr;
+
+ assert(rep != NULL);
+ /* Replace MPRQ buf. */
+ (*rxq->mprq_bufs)[rq_idx] = rep;
+ /* Replace WQE. */
+ addr = mlx5_mprq_buf_addr(rep);
+ wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
+ /* Stash a mbuf for next replacement. */
+ if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
+ rxq->mprq_repl = rep;
+ else
+ rxq->mprq_repl = NULL;
+}
+
+/**
+ * DPDK callback for RX with Multi-Packet RQ support.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
+ const unsigned int strd_n = 1 << rxq->strd_num_n;
+ const unsigned int strd_sz = 1 << rxq->strd_sz_n;
+ const unsigned int strd_shift =
+ MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
+ const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
+ const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
+ volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+ unsigned int i = 0;
+ uint16_t rq_ci = rxq->rq_ci;
+ uint16_t strd_idx = rxq->strd_ci;
+ struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
+
+ while (i < pkts_n) {
+ struct rte_mbuf *pkt;
+ void *addr;
+ int ret;
+ unsigned int len;
+ uint16_t consumed_strd;
+ uint32_t offset;
+ uint32_t byte_cnt;
+ uint32_t rss_hash_res = 0;
+
+ if (strd_idx == strd_n) {
+ /* Replace WQE only if the buffer is still in use. */
+ if (rte_atomic16_read(&buf->refcnt) > 1) {
+ mprq_buf_replace(rxq, rq_ci & wq_mask);
+ /* Release the old buffer. */
+ mlx5_mprq_buf_free(buf);
+ } else if (unlikely(rxq->mprq_repl == NULL)) {
+ struct mlx5_mprq_buf *rep;
+
+ /*
+ * Currently, the MPRQ mempool is out of buffer
+ * and doing memcpy regardless of the size of Rx
+ * packet. Retry allocation to get back to
+ * normal.
+ */
+ if (!rte_mempool_get(rxq->mprq_mp,
+ (void **)&rep))
+ rxq->mprq_repl = rep;
+ }
+ /* Advance to the next WQE. */
+ strd_idx = 0;
+ ++rq_ci;
+ buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
+ }
+ cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+ ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &rss_hash_res);
+ if (!ret)
+ break;
+ if (unlikely(ret == -1)) {
+ /* RX error, packet is likely too large. */
+ ++rxq->stats.idropped;
+ continue;
+ }
+ byte_cnt = ret;
+ consumed_strd = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
+ MLX5_MPRQ_STRIDE_NUM_SHIFT;
+ assert(consumed_strd);
+ /* Calculate offset before adding up stride index. */
+ offset = strd_idx * strd_sz + strd_shift;
+ strd_idx += consumed_strd;
+ if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
+ continue;
+ /*
+ * Currently configured to receive a packet per a stride. But if
+ * MTU is adjusted through kernel interface, device could
+ * consume multiple strides without raising an error. In this
+ * case, the packet should be dropped because it is bigger than
+ * the max_rx_pkt_len.
+ */
+ if (unlikely(consumed_strd > 1)) {
+ ++rxq->stats.idropped;
+ continue;
+ }
+ pkt = rte_pktmbuf_alloc(rxq->mp);
+ if (unlikely(pkt == NULL)) {
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
+ assert((int)len >= (rxq->crc_present << 2));
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
+ addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);
+ /* Initialize the offload flag. */
+ pkt->ol_flags = 0;
+ /*
+ * Memcpy packets to the target mbuf if:
+ * - The size of packet is smaller than mprq_max_memcpy_len.
+ * - Out of buffer in the Mempool for Multi-Packet RQ.
+ */
+ if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
+ /*
+ * When memcpy'ing packet due to out-of-buffer, the
+ * packet must be smaller than the target mbuf.
+ */
+ if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.idropped;
+ continue;
+ }
+ rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
+ } else {
+ rte_iova_t buf_iova;
+ struct rte_mbuf_ext_shared_info *shinfo;
+ uint16_t buf_len = consumed_strd * strd_sz;
+
+ /* Increment the refcnt of the whole chunk. */
+ rte_atomic16_add_return(&buf->refcnt, 1);
+ assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
+ strd_n + 1);
+ addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
+ /*
+ * MLX5 device doesn't use iova but it is necessary in a
+ * case where the Rx packet is transmitted via a
+ * different PMD.
+ */
+ buf_iova = rte_mempool_virt2iova(buf) +
+ RTE_PTR_DIFF(addr, buf);
+ shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr,
+ &buf_len, mlx5_mprq_buf_free_cb, buf);
+ /*
+ * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
+ * attaching the stride to mbuf and more offload flags
+ * will be added below by calling rxq_cq_to_mbuf().
+ * Other fields will be overwritten.
+ */
+ rte_pktmbuf_attach_extbuf(pkt, addr, buf_iova, buf_len,
+ shinfo);
+ rte_pktmbuf_reset_headroom(pkt);
+ assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
+ /*
+ * Prevent potential overflow due to MTU change through
+ * kernel interface.
+ */
+ if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.idropped;
+ continue;
+ }
+ }
+ rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
+ PKT_LEN(pkt) = len;
+ DATA_LEN(pkt) = len;
+ PORT(pkt) = rxq->port_id;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment bytes counter. */
+ rxq->stats.ibytes += PKT_LEN(pkt);
+#endif
+ /* Return packet. */
+ *(pkts++) = pkt;
+ ++i;
+ }
+ /* Update the consumer indexes. */
+ rxq->strd_ci = strd_idx;
+ rte_cio_wmb();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ if (rq_ci != rxq->rq_ci) {
+ rxq->rq_ci = rq_ci;
+ rte_cio_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment packets counter. */
+ rxq->stats.ipackets += i;
+#endif
+ return i;
+}
+
/**
* Dummy DPDK callback for TX.
*
@@ -1899,11 +2282,10 @@ skip:
* Number of packets successfully transmitted (<= pkts_n).
*/
uint16_t
-removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+removed_tx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
@@ -1924,11 +2306,10 @@ removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Number of packets successfully received (<= pkts_n).
*/
uint16_t
-removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+removed_rx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_rxq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
@@ -1940,58 +2321,49 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
*/
uint16_t __attribute__((weak))
-mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
uint16_t __attribute__((weak))
-mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
uint16_t __attribute__((weak))
-mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_rxq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
int __attribute__((weak))
-priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
- (void)priv;
- (void)dev;
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
- (void)priv;
- (void)dev;
return -ENOTSUP;
}
int __attribute__((weak))
-rxq_check_vec_support(struct mlx5_rxq_data *rxq)
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
{
- (void)rxq;
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_rx_support(struct priv *priv)
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
{
- (void)priv;
return -ENOTSUP;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index d7e89055..f53bb43c 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_H_
@@ -29,6 +29,7 @@
#include "mlx5_utils.h"
#include "mlx5.h"
+#include "mlx5_mr.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5_prm.h"
@@ -54,17 +55,6 @@ struct mlx5_txq_stats {
struct priv;
-/* Memory region queue object. */
-struct mlx5_mr {
- LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */
- rte_atomic32_t refcnt; /*<< Reference counter. */
- uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */
- uintptr_t start; /* Start address of MR */
- uintptr_t end; /* End address of MR */
- struct ibv_mr *mr; /*<< Memory Region. */
- struct rte_mempool *mp; /*<< Memory Pool. */
-};
-
/* Compressed CQE context. */
struct rxq_zip {
uint16_t ai; /* Array index. */
@@ -74,10 +64,19 @@ struct rxq_zip {
uint32_t cqe_cnt; /* Number of CQEs. */
};
+/* Multi-Packet RQ buffer header. */
+struct mlx5_mprq_buf {
+ struct rte_mempool *mp;
+ rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
+ uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
+} __rte_cache_aligned;
+
+/* Get pointer to the first stride. */
+#define mlx5_mprq_buf_addr(ptr) ((ptr) + 1)
+
/* RX queue descriptor. */
struct mlx5_rxq_data {
unsigned int csum:1; /* Enable checksum offloading. */
- unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int hw_timestamp:1; /* Enable HW timestamp. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
@@ -86,24 +85,37 @@ struct mlx5_rxq_data {
unsigned int elts_n:4; /* Log 2 of Mbufs. */
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int mark:1; /* Marked flow available on the queue. */
- unsigned int :15; /* Remaining bits. */
+ unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
+ unsigned int strd_sz_n:4; /* Log 2 of stride size. */
+ unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
+ unsigned int :6; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
uint16_t rq_ci;
+ uint16_t strd_ci; /* Stride index in a WQE for Multi-Packet RQ. */
uint16_t rq_pi;
uint16_t cq_ci;
- volatile struct mlx5_wqe_data_seg(*wqes)[];
+ struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
+ uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
+ volatile void *wqes;
volatile struct mlx5_cqe(*cqes)[];
struct rxq_zip zip; /* Compressed context. */
- struct rte_mbuf *(*elts)[];
+ RTE_STD_C11
+ union {
+ struct rte_mbuf *(*elts)[];
+ struct mlx5_mprq_buf *(*mprq_bufs)[];
+ };
struct rte_mempool *mp;
+ struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
+ struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
struct mlx5_rxq_stats stats;
uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
void *cq_uar; /* CQ user access region. */
uint32_t cqn; /* CQ number. */
uint8_t cq_arm_sn; /* CQ arm seq number. */
+ uint32_t tunnel; /* Tunnel information. */
} __rte_cache_aligned;
/* Verbs Rx queue elements. */
@@ -114,18 +126,19 @@ struct mlx5_rxq_ibv {
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_wq *wq; /* Work Queue. */
struct ibv_comp_channel *channel;
- struct mlx5_mr *mr; /* Memory Region (for mp). */
};
/* RX queue control descriptor. */
struct mlx5_rxq_ctrl {
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
- struct priv *priv; /* Back pointer to private data. */
struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
+ struct priv *priv; /* Back pointer to private data. */
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
+ uint32_t tunnel_types[16]; /* Tunnel type counter. */
unsigned int irq:1; /* Whether IRQ is enabled. */
+ uint16_t idx; /* Queue index. */
};
/* Indirection table. */
@@ -133,7 +146,7 @@ struct mlx5_ind_table_ibv {
LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
- uint16_t queues_n; /**< Number of queues in the list. */
+ uint32_t queues_n; /**< Number of queues in the list. */
uint16_t queues[]; /**< Queue list. */
};
@@ -144,7 +157,9 @@ struct mlx5_hrxq {
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
uint64_t hash_fields; /* Verbs Hash fields. */
- uint8_t rss_key_len; /* Hash key length in bytes. */
+ uint32_t tunnel; /* Tunnel type. */
+ uint32_t rss_level; /* RSS on tunnel level. */
+ uint32_t rss_key_len; /* Hash key length in bytes. */
uint8_t rss_key[]; /* Hash key. */
};
@@ -167,18 +182,18 @@ struct mlx5_txq_data {
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
uint16_t tunnel_en:1;
/* When set TX offload for tunneled packets are supported. */
+ uint16_t swp_en:1; /* Whether SW parser is enabled. */
uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
- uint16_t mr_cache_idx; /* Index of last hit entry. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
uint64_t offloads; /* Offloads for Tx Queue. */
+ struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register remapped. */
- struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
} __rte_cache_aligned;
@@ -187,6 +202,7 @@ struct mlx5_txq_data {
struct mlx5_txq_ibv {
LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
};
@@ -195,14 +211,15 @@ struct mlx5_txq_ibv {
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
- struct priv *priv; /* Back pointer to private data. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int max_inline_data; /* Max inline data. */
unsigned int max_tso_header; /* Max TSO header size. */
struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
+ struct priv *priv; /* Back pointer to private data. */
struct mlx5_txq_data txq; /* Data path structure. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
volatile void *bf_reg_orig; /* Blueflame register from verbs. */
+ uint16_t idx; /* Queue index. */
};
/* mlx5_rxq.c */
@@ -210,97 +227,126 @@ struct mlx5_txq_ctrl {
extern uint8_t rss_hash_default_key[];
extern const size_t rss_hash_default_key_len;
-void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *);
-int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
- const struct rte_eth_rxconf *, struct rte_mempool *);
-void mlx5_rx_queue_release(void *);
-int priv_rx_intr_vec_enable(struct priv *priv);
-void priv_rx_intr_vec_disable(struct priv *priv);
+int mlx5_check_mprq_support(struct rte_eth_dev *dev);
+int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
+int mlx5_mprq_enabled(struct rte_eth_dev *dev);
+int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
+int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
+void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl);
+int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+void mlx5_rx_queue_release(void *dpdk_rxq);
+int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
+void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *, uint16_t);
-struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t);
-int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *);
-int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *);
-int mlx5_priv_rxq_ibv_verify(struct priv *);
-struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t,
- uint16_t, unsigned int,
- const struct rte_eth_rxconf *,
- struct rte_mempool *);
-struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t);
-int mlx5_priv_rxq_release(struct priv *, uint16_t);
-int mlx5_priv_rxq_releasable(struct priv *, uint16_t);
-int mlx5_priv_rxq_verify(struct priv *);
-int rxq_alloc_elts(struct mlx5_rxq_ctrl *);
-struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *,
- uint16_t [],
- uint16_t);
-struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *,
- uint16_t [],
- uint16_t);
-int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *);
-int mlx5_priv_ind_table_ibv_verify(struct priv *);
-struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t,
- uint64_t, uint16_t [], uint16_t);
-struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t,
- uint64_t, uint16_t [], uint16_t);
-int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *);
-int mlx5_priv_hrxq_ibv_verify(struct priv *);
-uint64_t mlx5_priv_get_rx_port_offloads(struct priv *);
-uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
+int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_verify(struct rte_eth_dev *dev);
+int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
+int rxq_alloc_mprq_buf(struct mlx5_rxq_ctrl *rxq_ctrl);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
+ const uint16_t *queues,
+ uint32_t queues_n);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
+ const uint16_t *queues,
+ uint32_t queues_n);
+int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl);
+int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ uint32_t tunnel, uint32_t rss_level);
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ uint32_t tunnel, uint32_t rss_level);
+int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
+int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
+uint64_t mlx5_get_rx_port_offloads(void);
+uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
/* mlx5_txq.c */
-int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
- const struct rte_eth_txconf *);
-void mlx5_tx_queue_release(void *);
-int priv_tx_uar_remap(struct priv *priv, int fd);
-struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *, uint16_t);
-struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t);
-int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *);
-int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *);
-int mlx5_priv_txq_ibv_verify(struct priv *);
-struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t,
- uint16_t, unsigned int,
- const struct rte_eth_txconf *);
-struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t);
-int mlx5_priv_txq_release(struct priv *, uint16_t);
-int mlx5_priv_txq_releasable(struct priv *, uint16_t);
-int mlx5_priv_txq_verify(struct priv *);
-void txq_alloc_elts(struct mlx5_txq_ctrl *);
-uint64_t mlx5_priv_get_tx_port_offloads(struct priv *);
+int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf);
+void mlx5_tx_queue_release(void *dpdk_txq);
+int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
+struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf);
+struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_verify(struct rte_eth_dev *dev);
+void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
+uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
/* mlx5_rxtx.c */
extern uint32_t mlx5_ptype_table[];
+extern uint8_t mlx5_cksum_table[];
+extern uint8_t mlx5_swp_types_table[];
void mlx5_set_ptype_table(void);
-uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
-int mlx5_rx_descriptor_status(void *, uint16_t);
-int mlx5_tx_descriptor_status(void *, uint16_t);
+void mlx5_set_cksum_table(void);
+void mlx5_set_swp_types_table(void);
+uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
+void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
+void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
+uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
/* Vectorized version of mlx5_rxtx.c */
-int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *);
-int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *);
-int rxq_check_vec_support(struct mlx5_rxq_data *);
-int priv_check_vec_rx_support(struct priv *);
-uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
+int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
+int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
+uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
/* mlx5_mr.c */
-void mlx5_mp2mr_iter(struct rte_mempool *, void *);
-struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *,
- struct rte_mempool *, unsigned int);
-struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *,
- unsigned int);
+void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
+uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
+uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
#ifndef NDEBUG
/**
@@ -363,9 +409,10 @@ check_cqe(volatile struct mlx5_cqe *cqe,
(syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
return 0;
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE error %u (0x%02x)"
- " syndrome 0x%02x",
- op_code, op_code, syndrome);
+ DRV_LOG(ERR,
+ "unexpected CQE error %u (0x%02x) syndrome"
+ " 0x%02x",
+ op_code, op_code, syndrome);
rte_hexdump(stderr, "MLX5 Error CQE:",
(const void *)((uintptr_t)err_cqe),
sizeof(*err_cqe));
@@ -374,8 +421,8 @@ check_cqe(volatile struct mlx5_cqe *cqe,
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
(op_code != MLX5_CQE_REQ)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE opcode %u (0x%02x)",
- op_code, op_code);
+ DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
+ op_code, op_code);
rte_hexdump(stderr, "MLX5 CQE:",
(const void *)((uintptr_t)cqe),
sizeof(*cqe));
@@ -435,7 +482,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected error CQE, TX stopped");
+ DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
rte_hexdump(stderr, "MLX5 TXQ:",
(const void *)((uintptr_t)txq->wqes),
((1 << txq->wqe_n) *
@@ -487,77 +534,65 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
}
/**
- * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
- * the cloned mbuf is allocated is returned instead.
+ * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
+ * as mempool is pre-configured and static.
*
- * @param buf
- * Pointer to mbuf.
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Address to search.
*
* @return
- * Memory pool where data is located for given mbuf.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-static struct rte_mempool *
-mlx5_tx_mb2mp(struct rte_mbuf *buf)
+static __rte_always_inline uint32_t
+mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
{
- if (unlikely(RTE_MBUF_INDIRECT(buf)))
- return rte_mbuf_from_indirect(buf)->pool;
- return buf->pool;
+ struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (Binary Search) on miss. */
+ return mlx5_rx_addr2mr_bh(rxq, addr);
}
+#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
/**
- * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
- * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
- * remove an entry first.
+ * Query LKey from a packet buffer for Tx. If not found, add the mempool.
*
* @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Address to search.
*
* @return
- * mr->lkey on success, (uint32_t)-1 on failure.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
{
- uint16_t i = txq->mr_cache_idx;
- uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
- struct mlx5_mr *mr;
-
- assert(i < RTE_DIM(txq->mp2mr));
- if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr))
- return txq->mp2mr[i]->lkey;
- for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i] == NULL ||
- txq->mp2mr[i]->mr == NULL)) {
- /* Unknown MP, add a new MR for it. */
- break;
- }
- if (txq->mp2mr[i]->start <= addr &&
- txq->mp2mr[i]->end > addr) {
- assert(txq->mp2mr[i]->lkey != (uint32_t)-1);
- txq->mr_cache_idx = i;
- return txq->mp2mr[i]->lkey;
- }
- }
- mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
- /*
- * Request the reference to use in this queue, the original one is
- * kept by the control plane.
- */
- if (mr) {
- rte_atomic32_inc(&mr->refcnt);
- txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i;
- return mr->lkey;
- } else {
- struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
-
- WARN("Failed to register mempool 0x%p(%s)",
- (void *)mp, mp->name);
- }
- return (uint32_t)-1;
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Check generation bit to see if there's any change on existing MRs. */
+ if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
+ mlx5_mr_flush_local_cache(mr_ctrl);
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (binary search) on miss. */
+ return mlx5_tx_addr2mr_bh(txq, addr);
}
+#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
/**
* Ring TX queue doorbell and flush the update if requested.
*
@@ -599,38 +634,100 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
}
/**
- * Convert the Checksum offloads to Verbs.
+ * Convert mbuf to Verb SWP.
*
* @param txq_data
* Pointer to the Tx queue.
* @param buf
* Pointer to the mbuf.
+ * @param tso
+ * TSO offloads enabled.
+ * @param vlan
+ * VLAN offloads enabled
+ * @param offsets
+ * Pointer to the SWP header offsets.
+ * @param swp_types
+ * Pointer to the SWP header types.
+ */
+static __rte_always_inline void
+txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+ uint8_t *offsets, uint8_t *swp_types)
+{
+ const uint64_t vlan = buf->ol_flags & PKT_TX_VLAN_PKT;
+ const uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
+ const uint64_t tso = buf->ol_flags & PKT_TX_TCP_SEG;
+ const uint64_t csum_flags = buf->ol_flags & PKT_TX_L4_MASK;
+ const uint64_t inner_ip =
+ buf->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6);
+ const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
+ PKT_TX_OUTER_IPV6;
+ uint16_t idx;
+ uint16_t off;
+
+ if (likely(!txq->swp_en || (tunnel != PKT_TX_TUNNEL_UDP &&
+ tunnel != PKT_TX_TUNNEL_IP)))
+ return;
+ /*
+ * The index should have:
+ * bit[0:1] = PKT_TX_L4_MASK
+ * bit[4] = PKT_TX_IPV6
+ * bit[8] = PKT_TX_OUTER_IPV6
+ * bit[9] = PKT_TX_OUTER_UDP
+ */
+ idx = (buf->ol_flags & ol_flags_mask) >> 52;
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ idx |= 1 << 9;
+ *swp_types = mlx5_swp_types_table[idx];
+ /*
+ * Set offsets for SW parser. Since ConnectX-5, SW parser just
+ * complements HW parser. SW parser starts to engage only if HW parser
+ * can't reach a header. For the older devices, HW parser will not kick
+ * in if any of SWP offsets is set. Therefore, all of the L3 offsets
+ * should be set regardless of HW offload.
+ */
+ off = buf->outer_l2_len + (vlan ? sizeof(struct vlan_hdr) : 0);
+ offsets[1] = off >> 1; /* Outer L3 offset. */
+ off += buf->outer_l3_len;
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ offsets[0] = off >> 1; /* Outer L4 offset. */
+ if (inner_ip) {
+ off += buf->l2_len;
+ offsets[3] = off >> 1; /* Inner L3 offset. */
+ if (csum_flags == PKT_TX_TCP_CKSUM || tso ||
+ csum_flags == PKT_TX_UDP_CKSUM) {
+ off += buf->l3_len;
+ offsets[2] = off >> 1; /* Inner L4 offset. */
+ }
+ }
+}
+
+/**
+ * Convert the Checksum offloads to Verbs.
+ *
+ * @param buf
+ * Pointer to the mbuf.
*
* @return
- * the converted cs_flags.
+ * Converted checksum flags.
*/
static __rte_always_inline uint8_t
-txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
+txq_ol_cksum_to_cs(struct rte_mbuf *buf)
{
- uint8_t cs_flags = 0;
-
- /* Should we enable HW CKSUM offload */
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
- PKT_TX_OUTER_IP_CKSUM)) {
- if (txq_data->tunnel_en &&
- (buf->ol_flags &
- (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
- cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
- MLX5_ETH_WQE_L4_INNER_CSUM;
- if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
- cs_flags |= MLX5_ETH_WQE_L3_CSUM;
- } else {
- cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- }
- }
- return cs_flags;
+ uint32_t idx;
+ uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
+ const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
+
+ /*
+ * The index should have:
+ * bit[0] = PKT_TX_TCP_SEG
+ * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+ * bit[4] = PKT_TX_IP_CKSUM
+ * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[9] = tunnel
+ */
+ idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
+ return mlx5_cksum_table[idx];
}
/**
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index b66c2916..0a4aed8f 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <assert.h>
@@ -42,8 +42,6 @@
/**
* Count the number of packets having same ol_flags and calculate cs_flags.
*
- * @param txq
- * Pointer to TX queue structure.
* @param pkts
* Pointer to array of packets.
* @param pkts_n
@@ -55,8 +53,7 @@
* Number of packets having same ol_flags.
*/
static inline unsigned int
-txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
- uint16_t pkts_n, uint8_t *cs_flags)
+txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags)
{
unsigned int pos;
const uint64_t ol_mask =
@@ -70,7 +67,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
for (pos = 1; pos < pkts_n; ++pos)
if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
break;
- *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]);
+ *cs_flags = txq_ol_cksum_to_cs(pkts[0]);
return pos;
}
@@ -141,7 +138,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
n = txq_count_contig_single_seg(&pkts[nb_tx], n);
if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
- n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
+ n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags);
ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
nb_tx += ret;
if (!ret)
@@ -223,17 +220,14 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/**
* Check Tx queue flags are set for raw vectorized Tx.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to rte_eth_dev structure.
+ * Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_raw_vec_tx_support(__rte_unused struct priv *priv,
- struct rte_eth_dev *dev)
+mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev)
{
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
@@ -246,17 +240,16 @@ priv_check_raw_vec_tx_support(__rte_unused struct priv *priv,
/**
* Check a device can support vectorized TX.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to rte_eth_dev structure.
+ * Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_check_vec_tx_support(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
if (!priv->config.tx_vec_en ||
@@ -277,11 +270,13 @@ priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-rxq_check_vec_support(struct mlx5_rxq_data *rxq)
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv)))
+ return -ENOTSUP;
if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
@@ -290,26 +285,29 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq)
/**
* Check a device can support vectorized RX.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_vec_rx_support(struct priv *priv)
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
uint16_t i;
if (!priv->config.rx_vec_en)
return -ENOTSUP;
+ if (mlx5_mprq_enabled(dev))
+ return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
if (!rxq)
continue;
- if (rxq_check_vec_support(rxq) < 0)
+ if (mlx5_rxq_check_vec_support(rxq) < 0)
break;
}
if (i != priv->rxqs_n)
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 44856bbf..598dc751 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_H_
@@ -87,7 +87,8 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
const uint16_t q_mask = q_n - 1;
uint16_t elts_idx = rxq->rq_ci & q_mask;
struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
- volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
+ volatile struct mlx5_wqe_data_seg *wq =
+ &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
unsigned int i;
assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
@@ -99,9 +100,13 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
rxq->stats.rx_nombuf += n;
return;
}
- for (i = 0; i < n; ++i)
+ for (i = 0; i < n; ++i) {
wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
RTE_PKTMBUF_HEADROOM);
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
+ }
rxq->rq_ci += n;
/* Prevent overflowing into consumed mbufs. */
elts_idx = rxq->rq_ci & q_mask;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index bbe1818e..71a5eaf2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
@@ -142,7 +142,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
break;
wqe = &((volatile struct mlx5_wqe64 *)
txq->wqes)[wqe_ci & wq_mask].hdr;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Title WQEBB pointer. */
t_wqe = (uint8x16_t *)wqe;
dseg = (uint8_t *)(wqe + 1);
@@ -167,8 +167,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
vst1q_u8((void *)t_wqe, ctrl);
/* Fill ESEG in the header. */
vst1q_u16((void *)(t_wqe + 1),
- (uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
- 0, 0, 0, 0 });
+ ((uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
+ 0, 0, 0, 0 }));
txq->wqe_ci = wqe_ci;
}
if (!n)
@@ -300,10 +300,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
vst1q_u8((void *)t_wqe, ctrl);
/* Fill ESEG in the header. */
vst1q_u8((void *)(t_wqe + 1),
- (uint8x16_t) { 0, 0, 0, 0,
- cs_flags, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0 });
+ ((uint8x16_t) { 0, 0, 0, 0,
+ cs_flags, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }));
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += pkts_n;
#endif
@@ -551,6 +551,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
uint64x2_t rearm0, rearm1, rearm2, rearm3;
+ uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
if (rxq->mark) {
const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
@@ -583,14 +584,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
ptype = vshrn_n_u32(ptype_info, 10);
/* Errored packets will have RTE_PTYPE_ALL_MASK. */
ptype = vorr_u16(ptype, op_err);
- pkts[0]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)];
- pkts[1]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)];
- pkts[2]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)];
- pkts[3]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)];
+ pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6);
+ pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4);
+ pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2);
+ pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0);
+ pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+ pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+ !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+ pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+ !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+ pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+ !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
/* Fill flags for checksum and VLAN. */
pinfo = vandq_u32(ptype_info, ptype_ol_mask);
pinfo = vreinterpretq_u32_u8(
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index c088bcb5..3e985d61 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
@@ -144,7 +144,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
}
wqe = &((volatile struct mlx5_wqe64 *)
txq->wqes)[wqe_ci & wq_mask].hdr;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Title WQEBB pointer. */
t_wqe = (__m128i *)wqe;
dseg = (__m128i *)(wqe + 1);
@@ -542,6 +542,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
const __m128i mbuf_init =
_mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
__m128i rearm0, rearm1, rearm2, rearm3;
+ uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
/* Extract pkt_info field. */
pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
@@ -595,10 +596,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
/* Errored packets will have RTE_PTYPE_ALL_MASK. */
op_err = _mm_srli_epi16(op_err, 8);
ptype = _mm_or_si128(ptype, op_err);
- pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
- pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
- pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
- pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
+ pt_idx0 = _mm_extract_epi8(ptype, 0);
+ pt_idx1 = _mm_extract_epi8(ptype, 2);
+ pt_idx2 = _mm_extract_epi8(ptype, 4);
+ pt_idx3 = _mm_extract_epi8(ptype, 6);
+ pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+ pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+ !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+ pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+ !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+ pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+ !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
/* Fill flags for checksum and VLAN. */
pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 61c1a4a5..99297d5c 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#define _GNU_SOURCE
@@ -18,21 +19,21 @@
/**
* Initialise the socket to communicate with the secondary process
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_socket_init(struct priv *priv)
+mlx5_socket_init(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
int ret;
int flags;
- struct stat file_stat;
/*
* Initialise the socket to communicate with the secondary
@@ -40,70 +41,77 @@ priv_socket_init(struct priv *priv)
*/
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
- WARN("secondary process not supported: %s", strerror(errno));
- return ret;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
+ goto error;
}
priv->primary_socket = ret;
flags = fcntl(priv->primary_socket, F_GETFL, 0);
- if (flags == -1)
- goto out;
+ if (flags == -1) {
+ rte_errno = errno;
+ goto error;
+ }
ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ rte_errno = errno;
+ goto error;
+ }
snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
MLX5_DRIVER_NAME, priv->primary_socket);
- ret = stat(sun.sun_path, &file_stat);
- if (!ret)
- claim_zero(remove(sun.sun_path));
+ remove(sun.sun_path);
ret = bind(priv->primary_socket, (const struct sockaddr *)&sun,
sizeof(sun));
if (ret < 0) {
- WARN("cannot bind socket, secondary process not supported: %s",
- strerror(errno));
+ rte_errno = errno;
+ DRV_LOG(WARNING,
+ "port %u cannot bind socket, secondary process not"
+ " supported: %s",
+ dev->data->port_id, strerror(errno));
goto close;
}
ret = listen(priv->primary_socket, 0);
if (ret < 0) {
- WARN("Secondary process not supported: %s", strerror(errno));
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
goto close;
}
- return ret;
+ return 0;
close:
remove(sun.sun_path);
-out:
+error:
claim_zero(close(priv->primary_socket));
priv->primary_socket = 0;
- return -(ret);
+ return -rte_errno;
}
/**
* Un-Initialise the socket to communicate with the secondary process
*
- * @param[in] priv
- * Pointer to private structure.
- *
- * @return
- * 0 on success, errno value on failure.
+ * @param[in] dev
*/
-int
-priv_socket_uninit(struct priv *priv)
+void
+mlx5_socket_uninit(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+
MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket);
claim_zero(close(priv->primary_socket));
priv->primary_socket = 0;
claim_zero(remove(path));
- return 0;
}
/**
* Handle socket interrupts.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_socket_handle(struct priv *priv)
+mlx5_socket_handle(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int conn_sock;
int ret = 0;
struct cmsghdr *cmsg = NULL;
@@ -125,25 +133,30 @@ priv_socket_handle(struct priv *priv)
/* Accept the connection from the client. */
conn_sock = accept(priv->primary_socket, NULL, NULL);
if (conn_sock < 0) {
- WARN("connection failed: %s", strerror(errno));
+ DRV_LOG(WARNING, "port %u connection failed: %s",
+ dev->data->port_id, strerror(errno));
return;
}
ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1},
sizeof(int));
if (ret < 0) {
- WARN("cannot change socket options");
- goto out;
+ ret = errno;
+ DRV_LOG(WARNING, "port %u cannot change socket options: %s",
+ dev->data->port_id, strerror(rte_errno));
+ goto error;
}
ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
if (ret < 0) {
- WARN("received an empty message: %s", strerror(errno));
- goto out;
+ ret = errno;
+ DRV_LOG(WARNING, "port %u received an empty message: %s",
+ dev->data->port_id, strerror(rte_errno));
+ goto error;
}
/* Expect to receive credentials only. */
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
- WARN("no message");
- goto out;
+ DRV_LOG(WARNING, "port %u no message", dev->data->port_id);
+ goto error;
}
if ((cmsg->cmsg_type == SCM_CREDENTIALS) &&
(cmsg->cmsg_len >= sizeof(*cred))) {
@@ -152,14 +165,16 @@ priv_socket_handle(struct priv *priv)
}
cmsg = CMSG_NXTHDR(&msg, cmsg);
if (cmsg != NULL) {
- WARN("Message wrongly formatted");
- goto out;
+ DRV_LOG(WARNING, "port %u message wrongly formatted",
+ dev->data->port_id);
+ goto error;
}
/* Make sure all the ancillary data was received and valid. */
if ((cred == NULL) || (cred->uid != getuid()) ||
(cred->gid != getgid())) {
- WARN("wrong credentials");
- goto out;
+ DRV_LOG(WARNING, "port %u wrong credentials",
+ dev->data->port_id);
+ goto error;
}
/* Set-up the ancillary data. */
cmsg = CMSG_FIRSTHDR(&msg);
@@ -171,27 +186,29 @@ priv_socket_handle(struct priv *priv)
*fd = priv->ctx->cmd_fd;
ret = sendmsg(conn_sock, &msg, 0);
if (ret < 0)
- WARN("cannot send response");
-out:
+ DRV_LOG(WARNING, "port %u cannot send response",
+ dev->data->port_id);
+error:
close(conn_sock);
}
/**
* Connect to the primary process.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet structure.
*
* @return
- * fd on success, negative errno value on failure.
+ * fd on success, negative errno value otherwise and rte_errno is set.
*/
int
-priv_socket_connect(struct priv *priv)
+mlx5_socket_connect(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
- int socket_fd;
+ int socket_fd = -1;
int *fd = NULL;
int ret;
struct ucred *cred;
@@ -211,57 +228,75 @@ priv_socket_connect(struct priv *priv)
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
- WARN("cannot connect to primary");
- return ret;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u cannot connect to primary",
+ dev->data->port_id);
+ goto error;
}
socket_fd = ret;
snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
MLX5_DRIVER_NAME, priv->primary_socket);
ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun));
if (ret < 0) {
- WARN("cannot connect to primary");
- goto out;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u cannot connect to primary",
+ dev->data->port_id);
+ goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
- DEBUG("cannot get first message");
- goto out;
+ rte_errno = EINVAL;
+ DRV_LOG(DEBUG, "port %u cannot get first message",
+ dev->data->port_id);
+ goto error;
}
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_CREDENTIALS;
cmsg->cmsg_len = CMSG_LEN(sizeof(*cred));
cred = (struct ucred *)CMSG_DATA(cmsg);
if (cred == NULL) {
- DEBUG("no credentials received");
- goto out;
+ rte_errno = EINVAL;
+ DRV_LOG(DEBUG, "port %u no credentials received",
+ dev->data->port_id);
+ goto error;
}
cred->pid = getpid();
cred->uid = getuid();
cred->gid = getgid();
ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT);
if (ret < 0) {
- WARN("cannot send credentials to primary: %s",
- strerror(errno));
- goto out;
+ rte_errno = errno;
+ DRV_LOG(WARNING,
+ "port %u cannot send credentials to primary: %s",
+ dev->data->port_id, strerror(errno));
+ goto error;
}
ret = recvmsg(socket_fd, &msg, MSG_WAITALL);
if (ret <= 0) {
- WARN("no message from primary: %s", strerror(errno));
- goto out;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u no message from primary: %s",
+ dev->data->port_id, strerror(errno));
+ goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
- WARN("No file descriptor received");
- goto out;
+ rte_errno = EINVAL;
+ DRV_LOG(WARNING, "port %u no file descriptor received",
+ dev->data->port_id);
+ goto error;
}
fd = (int *)CMSG_DATA(cmsg);
- if (*fd <= 0) {
- WARN("no file descriptor received: %s", strerror(errno));
- ret = *fd;
- goto out;
+ if (*fd < 0) {
+ DRV_LOG(WARNING, "port %u no file descriptor received: %s",
+ dev->data->port_id, strerror(errno));
+ rte_errno = *fd;
+ goto error;
}
ret = *fd;
-out:
close(socket_fd);
return ret;
+error:
+ if (socket_fd != -1)
+ close(socket_fd);
+ return -rte_errno;
}
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 378472a7..875dd102 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -1,10 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
+#include <inttypes.h>
#include <linux/sockios.h>
#include <linux/ethtool.h>
+#include <stdint.h>
+#include <stdio.h>
#include <rte_ethdev_driver.h>
#include <rte_common.h>
@@ -19,6 +22,7 @@ struct mlx5_counter_ctrl {
char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
/* Name of the counter on the device table. */
char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t ib:1; /**< Nonzero for IB counters. */
};
static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
@@ -93,6 +97,7 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
{
.dpdk_name = "rx_out_of_buffer",
.ctr_name = "out_of_buffer",
+ .ib = 1,
},
{
.dpdk_name = "tx_packets_phy",
@@ -117,39 +122,56 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
/**
* Read device counters table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] stats
* Counters table output buffer.
*
* @return
- * 0 on success and stats is filled, negative on error.
+ * 0 on success and stats is filled, negative errno value otherwise and
+ * rte_errno is set.
*/
static int
-priv_read_dev_counters(struct priv *priv, uint64_t *stats)
+mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
struct ifreq ifr;
unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t);
unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz];
struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf;
+ int ret;
et_stats->cmd = ETHTOOL_GSTATS;
et_stats->n_stats = xstats_ctrl->stats_n;
ifr.ifr_data = (caddr_t)et_stats;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
- WARN("unable to read statistic values from device");
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u unable to read statistic values from device",
+ dev->data->port_id);
+ return ret;
}
for (i = 0; i != xstats_n; ++i) {
- if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name))
- priv_get_cntr_sysfs(priv,
- mlx5_counters_init[i].ctr_name,
- &stats[i]);
- else
+ if (mlx5_counters_init[i].ib) {
+ FILE *file;
+ MKSTR(path, "%s/ports/1/hw_counters/%s",
+ priv->ibdev_path,
+ mlx5_counters_init[i].ctr_name);
+
+ file = fopen(path, "rb");
+ if (file) {
+ int n = fscanf(file, "%" SCNu64, &stats[i]);
+
+ fclose(file);
+ if (n != 1)
+ stats[i] = 0;
+ }
+ } else {
stats[i] = (uint64_t)
et_stats->data[xstats_ctrl->dev_table_idx[i]];
+ }
}
return 0;
}
@@ -157,22 +179,26 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats)
/**
* Query the number of statistics provided by ETHTOOL.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
- * Number of statistics on success, -1 on error.
+ * Number of statistics on success, negative errno value otherwise and
+ * rte_errno is set.
*/
static int
-priv_ethtool_get_stats_n(struct priv *priv) {
+mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
struct ethtool_drvinfo drvinfo;
struct ifreq ifr;
+ int ret;
drvinfo.cmd = ETHTOOL_GDRVINFO;
ifr.ifr_data = (caddr_t)&drvinfo;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
- WARN("unable to query number of statistics");
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u unable to query number of statistics",
+ dev->data->port_id);
+ return ret;
}
return drvinfo.n_stats;
}
@@ -180,12 +206,13 @@ priv_ethtool_get_stats_n(struct priv *priv) {
/**
* Init the structures to read device counters.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_xstats_init(struct priv *priv)
+mlx5_xstats_init(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
unsigned int j;
@@ -193,12 +220,15 @@ priv_xstats_init(struct priv *priv)
struct ethtool_gstrings *strings = NULL;
unsigned int dev_stats_n;
unsigned int str_sz;
+ int ret;
- dev_stats_n = priv_ethtool_get_stats_n(priv);
- if (dev_stats_n < 1) {
- WARN("no extended statistics available");
+ ret = mlx5_ethtool_get_stats_n(dev);
+ if (ret < 0) {
+ DRV_LOG(WARNING, "port %u no extended statistics available",
+ dev->data->port_id);
return;
}
+ dev_stats_n = ret;
xstats_ctrl->stats_n = dev_stats_n;
/* Allocate memory to grab stat names and values. */
str_sz = dev_stats_n * ETH_GSTRING_LEN;
@@ -206,15 +236,18 @@ priv_xstats_init(struct priv *priv)
rte_malloc("xstats_strings",
str_sz + sizeof(struct ethtool_gstrings), 0);
if (!strings) {
- WARN("unable to allocate memory for xstats");
+ DRV_LOG(WARNING, "port %u unable to allocate memory for xstats",
+ dev->data->port_id);
return;
}
strings->cmd = ETHTOOL_GSTRINGS;
strings->string_set = ETH_SS_STATS;
strings->len = dev_stats_n;
ifr.ifr_data = (caddr_t)strings;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
- WARN("unable to get statistic names");
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u unable to get statistic names",
+ dev->data->port_id);
goto free;
}
for (j = 0; j != xstats_n; ++j)
@@ -232,68 +265,67 @@ priv_xstats_init(struct priv *priv)
}
}
for (j = 0; j != xstats_n; ++j) {
- if (priv_is_ib_cntr(mlx5_counters_init[j].ctr_name))
+ if (mlx5_counters_init[j].ib)
continue;
if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) {
- WARN("counter \"%s\" is not recognized",
- mlx5_counters_init[j].dpdk_name);
+ DRV_LOG(WARNING,
+ "port %u counter \"%s\" is not recognized",
+ dev->data->port_id,
+ mlx5_counters_init[j].dpdk_name);
goto free;
}
}
/* Copy to base at first time. */
assert(xstats_n <= MLX5_MAX_XSTATS);
- priv_read_dev_counters(priv, xstats_ctrl->base);
+ ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
free:
rte_free(strings);
}
/**
- * Get device extended statistics.
+ * DPDK callback to get extended device statistics.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] stats
* Pointer to rte extended stats table.
+ * @param n
+ * The size of the stats table.
*
* @return
* Number of extended stats on success and stats is filled,
- * negative on error.
+ * negative on error and rte_errno is set.
*/
-static int
-priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats)
+int
+mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ unsigned int n)
{
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
- unsigned int n = xstats_n;
uint64_t counters[n];
- if (priv_read_dev_counters(priv, counters) < 0)
- return -1;
- for (i = 0; i != xstats_n; ++i) {
- stats[i].id = i;
- stats[i].value = (counters[i] - xstats_ctrl->base[i]);
- }
- return n;
-}
-
-/**
- * Reset device extended statistics.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_xstats_reset(struct priv *priv)
-{
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
- unsigned int i;
- unsigned int n = xstats_n;
- uint64_t counters[n];
+ if (n >= xstats_n && stats) {
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ int stats_n;
+ int ret;
- if (priv_read_dev_counters(priv, counters) < 0)
- return;
- for (i = 0; i != n; ++i)
- xstats_ctrl->base[i] = counters[i];
+ stats_n = mlx5_ethtool_get_stats_n(dev);
+ if (stats_n < 0)
+ return stats_n;
+ if (xstats_ctrl->stats_n != stats_n)
+ mlx5_xstats_init(dev);
+ ret = mlx5_read_dev_counters(dev, counters);
+ if (ret)
+ return ret;
+ for (i = 0; i != xstats_n; ++i) {
+ stats[i].id = i;
+ stats[i].value = (counters[i] - xstats_ctrl->base[i]);
+ }
+ }
+ return xstats_n;
}
/**
@@ -303,6 +335,10 @@ priv_xstats_reset(struct priv *priv)
* Pointer to Ethernet device structure.
* @param[out] stats
* Stats structure output buffer.
+ *
+ * @return
+ * 0 on success and stats is filled, negative errno value otherwise and
+ * rte_errno is set.
*/
int
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
@@ -312,7 +348,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
unsigned int i;
unsigned int idx;
- priv_lock(priv);
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
@@ -358,7 +393,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* FIXME: retrieve and add hardware counters. */
#endif
*stats = tmp;
- priv_unlock(priv);
return 0;
}
@@ -375,7 +409,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
unsigned int i;
unsigned int idx;
- priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
continue;
@@ -393,45 +426,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
#ifndef MLX5_PMD_SOFT_COUNTERS
/* FIXME: reset hardware counters. */
#endif
- priv_unlock(priv);
-}
-
-/**
- * DPDK callback to get extended device statistics.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param[out] stats
- * Stats table output buffer.
- * @param n
- * The size of the stats table.
- *
- * @return
- * Number of xstats on success, negative on failure.
- */
-int
-mlx5_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstat *stats, unsigned int n)
-{
- struct priv *priv = dev->data->dev_private;
- int ret = xstats_n;
-
- if (n >= xstats_n && stats) {
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
- int stats_n;
-
- priv_lock(priv);
- stats_n = priv_ethtool_get_stats_n(priv);
- if (stats_n < 0) {
- priv_unlock(priv);
- return -1;
- }
- if (xstats_ctrl->stats_n != stats_n)
- priv_xstats_init(priv);
- ret = priv_xstats_get(priv, stats);
- priv_unlock(priv);
- }
- return ret;
}
/**
@@ -446,16 +440,27 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
+ unsigned int i;
+ unsigned int n = xstats_n;
+ uint64_t counters[n];
+ int ret;
- priv_lock(priv);
- stats_n = priv_ethtool_get_stats_n(priv);
- if (stats_n < 0)
- goto unlock;
+ stats_n = mlx5_ethtool_get_stats_n(dev);
+ if (stats_n < 0) {
+ DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id,
+ strerror(-stats_n));
+ return;
+ }
if (xstats_ctrl->stats_n != stats_n)
- priv_xstats_init(priv);
- priv_xstats_reset(priv);
-unlock:
- priv_unlock(priv);
+ mlx5_xstats_init(dev);
+ ret = mlx5_read_dev_counters(dev, counters);
+ if (ret) {
+ DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return;
+ }
+ for (i = 0; i != n; ++i)
+ xstats_ctrl->base[i] = counters[i];
}
/**
@@ -472,21 +477,18 @@ unlock:
* Number of xstats names.
*/
int
-mlx5_xstats_get_names(struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, unsigned int n)
+mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
- struct priv *priv = dev->data->dev_private;
unsigned int i;
if (n >= xstats_n && xstats_names) {
- priv_lock(priv);
for (i = 0; i != xstats_n; ++i) {
strncpy(xstats_names[i].name,
mlx5_counters_init[i].dpdk_name,
RTE_ETH_XSTATS_NAME_SIZE);
xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
}
- priv_unlock(priv);
}
return xstats_n;
}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f5711a99..3e7c0a90 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <unistd.h>
@@ -14,83 +14,125 @@
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/**
+ * Stop traffic on Tx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
static void
-priv_txq_stop(struct priv *priv)
+mlx5_txq_stop(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->txqs_n; ++i)
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(dev, i);
}
+/**
+ * Start traffic on Tx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
static int
-priv_txq_start(struct priv *priv)
+mlx5_txq_start(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
- int ret = 0;
+ int ret;
/* Add memory regions to Tx queues. */
for (i = 0; i != priv->txqs_n; ++i) {
- unsigned int idx = 0;
- struct mlx5_mr *mr;
- struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i);
+ struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
- LIST_FOREACH(mr, &priv->mr, next) {
- priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
- if (idx == MLX5_PMD_TX_MP_CACHE)
- break;
- }
txq_alloc_elts(txq_ctrl);
- txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
+ txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
if (!txq_ctrl->ibv) {
- ret = ENOMEM;
+ rte_errno = ENOMEM;
goto error;
}
}
- ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd);
+ ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
if (ret)
goto error;
- return ret;
+ return 0;
error:
- priv_txq_stop(priv);
- return ret;
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_txq_stop(dev);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
+/**
+ * Stop traffic on Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
static void
-priv_rxq_stop(struct priv *priv)
+mlx5_rxq_stop(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i)
- mlx5_priv_rxq_release(priv, i);
+ mlx5_rxq_release(dev, i);
}
+/**
+ * Start traffic on Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
static int
-priv_rxq_start(struct priv *priv)
+mlx5_rxq_start(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
int ret = 0;
+ /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
+ if (mlx5_mprq_alloc_mp(dev))
+ goto error;
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
+ struct rte_mempool *mp;
if (!rxq_ctrl)
continue;
+ /* Pre-register Rx mempool. */
+ mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u registering"
+ " mp %s having %u chunks",
+ dev->data->port_id, rxq_ctrl->idx,
+ mp->name, mp->nb_mem_chunks);
+ mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)
goto error;
- rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i);
- if (!rxq_ctrl->ibv) {
- ret = ENOMEM;
+ rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i);
+ if (!rxq_ctrl->ibv)
goto error;
- }
}
- return -ret;
+ return 0;
error:
- priv_rxq_stop(priv);
- return -ret;
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_rxq_stop(dev);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -102,68 +144,64 @@ error:
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr = NULL;
- int err;
+ int ret;
dev->data->dev_started = 1;
- priv_lock(priv);
- err = priv_flow_create_drop_queue(priv);
- if (err) {
- ERROR("%p: Drop queue allocation failed: %s",
- (void *)dev, strerror(err));
+ DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues",
+ dev->data->port_id);
+ ret = mlx5_txq_start(dev);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
- DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
- rte_mempool_walk(mlx5_mp2mr_iter, priv);
- err = priv_txq_start(priv);
- if (err) {
- ERROR("%p: TXQ allocation failed: %s",
- (void *)dev, strerror(err));
+ ret = mlx5_rxq_start(dev);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
- err = priv_rxq_start(priv);
- if (err) {
- ERROR("%p: RXQ allocation failed: %s",
- (void *)dev, strerror(err));
+ if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
+ mlx5_mr_dump_dev(dev);
+ ret = mlx5_rx_intr_vec_enable(dev);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
+ dev->data->port_id);
goto error;
}
- err = priv_rx_intr_vec_enable(priv);
- if (err) {
- ERROR("%p: RX interrupt vector creation failed",
- (void *)priv);
+ mlx5_xstats_init(dev);
+ ret = mlx5_traffic_enable(dev);
+ if (ret) {
+ DRV_LOG(DEBUG, "port %u failed to set defaults flows",
+ dev->data->port_id);
goto error;
}
- priv_xstats_init(priv);
- /* Update link status and Tx/Rx callbacks for the first time. */
- memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
- INFO("Forcing port %u link to be up", dev->data->port_id);
- err = priv_force_link_status_change(priv, ETH_LINK_UP);
- if (err) {
- DEBUG("Failed to set port %u link to be up",
- dev->data->port_id);
+ ret = mlx5_flow_start(dev, &priv->flows);
+ if (ret) {
+ DRV_LOG(DEBUG, "port %u failed to set flows",
+ dev->data->port_id);
goto error;
}
- priv_dev_interrupt_handler_install(priv, dev);
- priv_unlock(priv);
+ dev->tx_pkt_burst = mlx5_select_tx_function(dev);
+ dev->rx_pkt_burst = mlx5_select_rx_function(dev);
+ mlx5_dev_interrupt_handler_install(dev);
return 0;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
/* Rollback. */
dev->data->dev_started = 0;
- for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
- priv_mr_release(priv, mr);
- priv_flow_stop(priv, &priv->flows);
- priv_dev_traffic_disable(priv, dev);
- priv_txq_stop(priv);
- priv_rxq_stop(priv);
- priv_flow_delete_drop_queue(priv);
- priv_unlock(priv);
- return err;
+ mlx5_flow_stop(dev, &priv->flows);
+ mlx5_traffic_disable(dev);
+ mlx5_txq_stop(dev);
+ mlx5_rxq_stop(dev);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -178,42 +216,38 @@ void
mlx5_dev_stop(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr;
- priv_lock(priv);
dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
rte_wmb();
usleep(1000 * priv->rxqs_n);
- DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
- priv_flow_stop(priv, &priv->flows);
- priv_dev_traffic_disable(priv, dev);
- priv_rx_intr_vec_disable(priv);
- priv_dev_interrupt_handler_uninstall(priv, dev);
- priv_txq_stop(priv);
- priv_rxq_stop(priv);
- for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
- priv_mr_release(priv, mr);
- priv_flow_delete_drop_queue(priv);
- priv_unlock(priv);
+ DRV_LOG(DEBUG, "port %u cleaning up and destroying hash Rx queues",
+ dev->data->port_id);
+ mlx5_flow_stop(dev, &priv->flows);
+ mlx5_traffic_disable(dev);
+ mlx5_rx_intr_vec_disable(dev);
+ mlx5_dev_interrupt_handler_uninstall(dev);
+ mlx5_txq_stop(dev);
+ mlx5_rxq_stop(dev);
}
/**
* Enable traffic flows configured by control plane
*
- * @param priv
+ * @param dev
* Pointer to Ethernet device private data.
* @param dev
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_traffic_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow_item_eth bcast = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
@@ -246,8 +280,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
.type = 0,
};
- claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
- return 0;
+ ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
+ if (ret)
+ goto error;
}
if (dev->data->all_multicast) {
struct rte_flow_item_eth multicast = {
@@ -256,7 +291,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
.type = 0,
};
- claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
+ ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
+ if (ret)
+ goto error;
} else {
/* Add broadcast/multicast flows. */
for (i = 0; i != vlan_filter_n; ++i) {
@@ -316,74 +353,49 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
goto error;
}
if (!vlan_filter_n) {
- ret = mlx5_ctrl_flow(dev, &unicast,
- &unicast_mask);
+ ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
if (ret)
goto error;
}
}
return 0;
error:
- return rte_errno;
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
* Disable traffic flows configured by control plane
*
- * @param priv
- * Pointer to Ethernet device private data.
* @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success.
- */
-int
-priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev)
-{
- (void)dev;
- priv_flow_flush(priv, &priv->ctrl_flows);
- return 0;
-}
-
-/**
- * Restart traffic flows configured by control plane
- *
- * @param priv
* Pointer to Ethernet device private data.
- * @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success.
*/
-int
-priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
+void
+mlx5_traffic_disable(struct rte_eth_dev *dev)
{
- if (dev->data->dev_started) {
- priv_dev_traffic_disable(priv, dev);
- priv_dev_traffic_enable(priv, dev);
- }
- return 0;
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_flush(dev, &priv->ctrl_flows);
}
/**
* Restart traffic flows configured by control plane
*
* @param dev
- * Pointer to Ethernet device structure.
+ * Pointer to Ethernet device private data.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_traffic_restart(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
-
- priv_lock(priv);
- priv_dev_traffic_restart(priv, dev);
- priv_unlock(priv);
+ if (dev->data->dev_started) {
+ mlx5_traffic_disable(dev);
+ return mlx5_traffic_enable(dev);
+ }
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index ed1c713e..691ea071 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -47,7 +47,8 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
- DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
+ DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
+ PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@@ -68,7 +69,8 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
uint16_t elts_tail = txq_ctrl->txq.elts_tail;
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
- DEBUG("%p: freeing WRs", (void *)txq_ctrl);
+ DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
+ PORT_ID(txq_ctrl->priv), txq_ctrl->idx);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@@ -91,15 +93,16 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
/**
* Returns the per-port supported offloads.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* Supported Tx offloads.
*/
uint64_t
-mlx5_priv_get_tx_port_offloads(struct priv *priv)
+mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT);
struct mlx5_dev_config *config = &priv->config;
@@ -116,36 +119,14 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv)
if (config->tso)
offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ if (config->swp)
+ offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
}
return offloads;
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param offloads
- * Per-queue offloads configuration.
- *
- * @return
- * 1 if the configuration is valid, 0 otherwise.
- */
-static int
-priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
-{
- uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
- uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
-
- /* There are no Tx offloads which are per queue. */
- if ((offloads & port_supp_offloads) != offloads)
- return 0;
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return 0;
- return 1;
-}
-
-/**
* DPDK callback to configure a TX queue.
*
* @param dev
@@ -160,7 +141,7 @@ priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
* Thresholds parameters.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
@@ -170,64 +151,47 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx5_txq_data *txq = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
- int ret = 0;
- priv_lock(priv);
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
- ret = ENOTSUP;
- ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- mlx5_priv_get_tx_port_offloads(priv));
- goto out;
- }
if (desc <= MLX5_TX_COMP_THRESH) {
- WARN("%p: number of descriptors requested for TX queue %u"
- " must be higher than MLX5_TX_COMP_THRESH, using"
- " %u instead of %u",
- (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
+ DRV_LOG(WARNING,
+ "port %u number of descriptors requested for Tx queue"
+ " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
+ " instead of %u",
+ dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
desc = MLX5_TX_COMP_THRESH + 1;
}
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
- WARN("%p: increased number of descriptors in TX queue %u"
- " to the next power of two (%d)",
- (void *)dev, idx, desc);
+ DRV_LOG(WARNING,
+ "port %u increased number of descriptors in Tx queue"
+ " %u to the next power of two (%d)",
+ dev->data->port_id, idx, desc);
}
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
+ DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
if (idx >= priv->txqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->txqs_n);
- priv_unlock(priv);
- return -EOVERFLOW;
+ DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->txqs_n);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
}
- if (!mlx5_priv_txq_releasable(priv, idx)) {
- ret = EBUSY;
- ERROR("%p: unable to release queue index %u",
- (void *)dev, idx);
- goto out;
+ if (!mlx5_txq_releasable(dev, idx)) {
+ rte_errno = EBUSY;
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
+ return -rte_errno;
}
- mlx5_priv_txq_release(priv, idx);
- txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf);
+ mlx5_txq_release(dev, idx);
+ txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
if (!txq_ctrl) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- ret = ENOMEM;
- goto out;
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
+ return -rte_errno;
}
- DEBUG("%p: adding TX queue %p to list",
- (void *)dev, (void *)txq_ctrl);
+ DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
+ dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
-out:
- priv_unlock(priv);
- return -ret;
+ return 0;
}
/**
@@ -248,15 +212,13 @@ mlx5_tx_queue_release(void *dpdk_txq)
return;
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
priv = txq_ctrl->priv;
- priv_lock(priv);
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
- DEBUG("%p: removing TX queue %p from list",
- (void *)priv->dev, (void *)txq_ctrl);
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(ETH_DEV(priv), i);
+ DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
+ PORT_ID(priv), txq_ctrl->idx);
break;
}
- priv_unlock(priv);
}
@@ -265,17 +227,18 @@ mlx5_tx_queue_release(void *dpdk_txq)
* Both primary and secondary process do mmap to make UAR address
* aligned.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param fd
* Verbs file descriptor to map UAR pages.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_tx_uar_remap(struct priv *priv, int fd)
+mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i, j;
uintptr_t pages[priv->txqs_n];
unsigned int pages_n = 0;
@@ -287,7 +250,6 @@ priv_tx_uar_remap(struct priv *priv, int fd)
struct mlx5_txq_ctrl *txq_ctrl;
int already_mapped;
size_t page_size = sysconf(_SC_PAGESIZE);
- int r;
memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
/*
@@ -300,6 +262,7 @@ priv_tx_uar_remap(struct priv *priv, int fd)
continue;
txq = (*priv->txqs)[i];
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ assert(txq_ctrl->idx == (uint16_t)i);
/* UAR addr form verbs used to find dup and offset in page. */
uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
off = uar_va & (page_size - 1); /* offset in page. */
@@ -324,10 +287,12 @@ priv_tx_uar_remap(struct priv *priv, int fd)
txq_ctrl->uar_mmap_offset);
if (ret != addr) {
/* fixed mmap have to return same address */
- ERROR("call to mmap failed on UAR for txq %d\n",
- i);
- r = ENXIO;
- return r;
+ DRV_LOG(ERR,
+ "port %u call to mmap failed on UAR"
+ " for txq %u",
+ dev->data->port_id, txq_ctrl->idx);
+ rte_errno = ENXIO;
+ return -rte_errno;
}
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
@@ -361,17 +326,18 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
/**
* Create the Tx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
- * The Verbs object initialised if it can be created.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_txq_ibv*
-mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
+struct mlx5_txq_ibv *
+mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
@@ -388,18 +354,20 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
struct mlx5dv_cq cq_info;
struct mlx5dv_obj obj;
const int desc = 1 << txq_data->elts_n;
- eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev);
int ret = 0;
assert(txq_data);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
priv->verbs_alloc_ctx.obj = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
- ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
- goto error;
+ DRV_LOG(ERR,
+ "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return NULL;
}
memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
- /* MRs will be registered in mp2mr[] later. */
attr.cq = (struct ibv_cq_init_attr_ex){
.comp_mask = 0,
};
@@ -409,7 +377,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
- ERROR("%p: CQ creation failure", (void *)txq_ctrl);
+ DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
attr.init = (struct ibv_qp_init_attr_ex){
@@ -450,7 +420,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
}
tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
- ERROR("%p: QP creation failure", (void *)txq_ctrl);
+ DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
attr.mod = (struct ibv_qp_attr){
@@ -462,7 +434,10 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
(IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
- ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
attr.mod = (struct ibv_qp_attr){
@@ -470,19 +445,27 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
};
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
txq_ctrl->socket);
if (!txq_ibv) {
- ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
+ DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
goto error;
}
obj.cq.in = tmpl.cq;
@@ -490,11 +473,16 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
obj.qp.in = tmpl.qp;
obj.qp.out = &qp;
ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
- if (ret != 0)
+ if (ret != 0) {
+ rte_errno = errno;
goto error;
+ }
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
goto error;
}
txq_data->cqe_n = log2above(cq_info.cqe_cnt);
@@ -519,37 +507,45 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
} else {
- ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
+ DRV_LOG(ERR,
+ "port %u failed to retrieve UAR info, invalid"
+ " libmlx5.so",
+ dev->data->port_id);
+ rte_errno = EINVAL;
goto error;
}
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
- (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
+ dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt));
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
+ txq_ibv->txq_ctrl = txq_ctrl;
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return txq_ibv;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (tmpl.cq)
claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
if (tmpl.qp)
claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ rte_errno = ret; /* Restore rte_errno. */
return NULL;
}
/**
* Get an Tx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
* The Verbs object if it exists.
*/
-struct mlx5_txq_ibv*
-mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
+struct mlx5_txq_ibv *
+mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq_ctrl;
if (idx >= priv->txqs_n)
@@ -559,8 +555,8 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
if (txq_ctrl->ibv) {
rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
- (void *)txq_ctrl->ibv,
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
+ dev->data->port_id, txq_ctrl->idx,
rte_atomic32_read(&txq_ctrl->ibv->refcnt));
}
return txq_ctrl->ibv;
@@ -569,21 +565,19 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
/**
* Release an Tx verbs queue object.
*
- * @param priv
- * Pointer to private structure.
* @param txq_ibv
* Verbs Tx queue object.
*
* @return
- * 0 on success, errno on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
{
- (void)priv;
assert(txq_ibv);
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
- (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
+ PORT_ID(txq_ibv->txq_ctrl->priv),
+ txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
@@ -591,21 +585,18 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
rte_free(txq_ibv);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Return true if a single reference exists on the object.
*
- * @param priv
- * Pointer to private structure.
* @param txq_ibv
* Verbs Tx queue object.
*/
int
-mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
{
- (void)priv;
assert(txq_ibv);
return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
}
@@ -613,20 +604,22 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
/**
* Verify the Verbs Tx queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_txq_ibv_verify(struct priv *priv)
+mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_txq_ibv *txq_ibv;
LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
- DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv,
- (void *)txq_ibv);
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
+ dev->data->port_id, txq_ibv->txq_ctrl->idx);
++ret;
}
return ret;
@@ -649,9 +642,14 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
unsigned int txq_inline;
unsigned int txqs_inline;
unsigned int inline_max_packet_sz;
- eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ eth_tx_burst_t tx_pkt_burst =
+ mlx5_select_tx_function(ETH_DEV(priv));
int is_empw_func = is_empw_burst_func(tx_pkt_burst);
- int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
+ int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO));
txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
0 : config->txq_inline;
@@ -685,18 +683,6 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (tso) {
- int inline_diff = txq_ctrl->txq.max_inline -
- max_tso_inline;
-
- /*
- * Adjust inline value as Verbs aggregates
- * tso_inline and txq_inline fields.
- */
- txq_ctrl->max_inline_data = inline_diff > 0 ?
- inline_diff *
- RTE_CACHE_LINE_SIZE :
- 0;
} else {
txq_ctrl->max_inline_data =
txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
@@ -716,9 +702,10 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
max_inline = max_inline - (max_inline %
RTE_CACHE_LINE_SIZE);
- WARN("txq inline is too large (%d) setting it to "
- "the maximum possible: %d\n",
- txq_inline, max_inline);
+ DRV_LOG(WARNING,
+ "port %u txq inline is too large (%d) setting"
+ " it to the maximum possible: %d\n",
+ PORT_ID(priv), txq_inline, max_inline);
txq_ctrl->txq.max_inline = max_inline /
RTE_CACHE_LINE_SIZE;
}
@@ -730,13 +717,17 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
txq_ctrl->txq.tso_en = 1;
}
txq_ctrl->txq.tunnel_en = config->tunnel_en;
+ txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
+ txq_ctrl->txq.offloads) && config->swp;
}
/**
* Create a DPDK Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
* @param desc
@@ -747,76 +738,80 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
* Thresholds parameters.
*
* @return
- * A DPDK queue object on success.
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
-struct mlx5_txq_ctrl*
-mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket,
- const struct rte_eth_txconf *conf)
+struct mlx5_txq_ctrl *
+mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *tmpl;
tmpl = rte_calloc_socket("TXQ", 1,
sizeof(*tmpl) +
desc * sizeof(struct rte_mbuf *),
0, socket);
- if (!tmpl)
+ if (!tmpl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
+ if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
+ MLX5_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ /* Save pointer of global generation number to check memory event. */
+ tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
assert(desc > MLX5_TX_COMP_THRESH);
- tmpl->txq.offloads = conf->offloads;
+ tmpl->txq.offloads = conf->offloads |
+ dev->data->dev_conf.txmode.offloads;
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
+ tmpl->idx = idx;
txq_set_params(tmpl);
- /* MRs will be registered in mp2mr[] later. */
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.orig_attr.max_sge);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
- (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
+ idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
+error:
+ rte_free(tmpl);
+ return NULL;
}
/**
* Get a Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
* A pointer to the queue if it exists.
*/
-struct mlx5_txq_ctrl*
-mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
+struct mlx5_txq_ctrl *
+mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *ctrl = NULL;
if ((*priv->txqs)[idx]) {
ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
txq);
- unsigned int i;
-
- mlx5_priv_txq_ibv_get(priv, idx);
- for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- struct mlx5_mr *mr = NULL;
-
- (void)mr;
- if (ctrl->txq.mp2mr[i]) {
- mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp);
- assert(mr);
- }
- }
+ mlx5_txq_ibv_get(dev, idx);
rte_atomic32_inc(&ctrl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
- (void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
+ DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
+ dev->data->port_id,
+ ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
}
return ctrl;
}
@@ -824,57 +819,47 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
/**
* Release a Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
- * 0 on success, errno on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
+mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
- unsigned int i;
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
size_t page_size = sysconf(_SC_PAGESIZE);
if (!(*priv->txqs)[idx])
return 0;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
- (void *)txq, rte_atomic32_read(&txq->refcnt));
- if (txq->ibv) {
- int ret;
-
- ret = mlx5_priv_txq_ibv_release(priv, txq->ibv);
- if (!ret)
- txq->ibv = NULL;
- }
- for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- if (txq->txq.mp2mr[i]) {
- priv_mr_release(priv, txq->txq.mp2mr[i]);
- txq->txq.mp2mr[i] = NULL;
- }
- }
+ DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
+ txq->idx, rte_atomic32_read(&txq->refcnt));
+ if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
+ txq->ibv = NULL;
if (priv->uar_base)
munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
page_size), page_size);
if (rte_atomic32_dec_and_test(&txq->refcnt)) {
txq_free_elts(txq);
+ mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq, next);
rte_free(txq);
(*priv->txqs)[idx] = NULL;
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Verify if the queue can be released.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
@@ -882,8 +867,9 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
* 1 if the queue can be released.
*/
int
-mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
+mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
if (!(*priv->txqs)[idx])
@@ -895,20 +881,22 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
/**
* Verify the Tx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_txq_verify(struct priv *priv)
+mlx5_txq_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
int ret = 0;
LIST_FOREACH(txq, &priv->txqsctrl, next) {
- DEBUG("%p: Tx Queue %p still referenced", (void *)priv,
- (void *)txq);
+ DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
+ dev->data->port_id, txq->idx);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index e1bfb9cd..886f60e6 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_UTILS_H_
@@ -61,14 +61,21 @@ pmd_drv_log_basename(const char *s)
return s;
}
+extern int mlx5_logtype;
+
+#define PMD_DRV_LOG___(level, ...) \
+ rte_log(RTE_LOG_ ## level, \
+ mlx5_logtype, \
+ RTE_FMT(MLX5_DRIVER_NAME ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,), \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
/*
* When debugging is enabled (NDEBUG not defined), file, line and function
* information replace the driver name (MLX5_DRIVER_NAME) in log messages.
*/
#ifndef NDEBUG
-#define PMD_DRV_LOG___(level, ...) \
- ERRNO_SAFE(RTE_LOG(level, PMD, __VA_ARGS__))
#define PMD_DRV_LOG__(level, ...) \
PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__)
#define PMD_DRV_LOG_(level, s, ...) \
@@ -80,9 +87,6 @@ pmd_drv_log_basename(const char *s)
__VA_ARGS__)
#else /* NDEBUG */
-
-#define PMD_DRV_LOG___(level, ...) \
- ERRNO_SAFE(RTE_LOG(level, PMD, MLX5_DRIVER_NAME ": " __VA_ARGS__))
#define PMD_DRV_LOG__(level, ...) \
PMD_DRV_LOG___(level, __VA_ARGS__)
#define PMD_DRV_LOG_(level, s, ...) \
@@ -91,18 +95,15 @@ pmd_drv_log_basename(const char *s)
#endif /* NDEBUG */
/* Generic printf()-like logging macro with automatic line feed. */
-#define PMD_DRV_LOG(level, ...) \
+#define DRV_LOG(level, ...) \
PMD_DRV_LOG_(level, \
__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
PMD_DRV_LOG_CPAREN)
-/*
- * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform
- * any check when debugging is disabled.
- */
+/* claim_zero() does not perform any check when debugging is disabled. */
#ifndef NDEBUG
-#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
+#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
#define claim_zero(...) assert((__VA_ARGS__) == 0)
#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
@@ -114,9 +115,9 @@ pmd_drv_log_basename(const char *s)
#endif /* NDEBUG */
-#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__)
-#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__)
-#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__)
+#define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
+#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
+#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
/* Convenience macros for accessing mbuf fields. */
#define NEXT(m) ((m)->next)
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 75c34562..c91d08be 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -8,6 +8,12 @@
#include <assert.h>
#include <stdint.h>
+/*
+ * Not needed by this file; included to work around the lack of off_t
+ * definition for mlx5dv.h with unpatched rdma-core versions.
+ */
+#include <sys/types.h>
+
/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -37,26 +43,24 @@
* Toggle filter.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct priv *priv = dev->data->dev_private;
unsigned int i;
- int ret = 0;
- priv_lock(priv);
- DEBUG("%p: %s VLAN filter ID %" PRIu16,
- (void *)dev, (on ? "enable" : "disable"), vlan_id);
+ DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
+ dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
for (i = 0; (i != priv->vlan_filter_n); ++i)
if (priv->vlan_filter[i] == vlan_id)
break;
/* Check if there's room for another VLAN filter. */
if (i == RTE_DIM(priv->vlan_filter)) {
- ret = -ENOMEM;
- goto out;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
if (i < priv->vlan_filter_n) {
assert(priv->vlan_filter_n != 0);
@@ -79,37 +83,49 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
++priv->vlan_filter_n;
}
- if (dev->data->dev_started)
- priv_dev_traffic_restart(priv, dev);
out:
- priv_unlock(priv);
- return ret;
+ if (dev->data->dev_started)
+ return mlx5_traffic_restart(dev);
+ return 0;
}
/**
- * Set/reset VLAN stripping for a specific queue.
+ * Callback to set/reset VLAN stripping for a specific queue.
*
- * @param priv
- * Pointer to private structure.
- * @param idx
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue
* RX queue index.
* @param on
* Enable/disable VLAN stripping.
*/
-static void
-priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
+void
+mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
{
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct ibv_wq_attr mod;
uint16_t vlan_offloads =
(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
0;
- int err;
+ int ret;
- DEBUG("set VLAN offloads 0x%x for port %d queue %d",
- vlan_offloads, rxq->port_id, idx);
+ /* Validate hw support */
+ if (!priv->config.hw_vlan_strip) {
+ DRV_LOG(ERR, "port %u VLAN stripping is not supported",
+ dev->data->port_id);
+ return;
+ }
+ /* Validate queue number */
+ if (queue >= priv->rxqs_n) {
+ DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
+ dev->data->port_id, queue);
+ return;
+ }
+ DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d",
+ dev->data->port_id, vlan_offloads, rxq->port_id, queue);
if (!rxq_ctrl->ibv) {
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
@@ -120,57 +136,26 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
.flags = vlan_offloads,
};
-
- err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
- if (err) {
- ERROR("%p: failed to modified stripping mode: %s",
- (void *)priv, strerror(err));
+ ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
+ if (ret) {
+ DRV_LOG(ERR, "port %u failed to modified stripping mode: %s",
+ dev->data->port_id, strerror(rte_errno));
return;
}
-
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
}
/**
- * Callback to set/reset VLAN stripping for a specific queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param queue
- * RX queue index.
- * @param on
- * Enable/disable VLAN stripping.
- */
-void
-mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
-{
- struct priv *priv = dev->data->dev_private;
-
- /* Validate hw support */
- if (!priv->config.hw_vlan_strip) {
- ERROR("VLAN stripping is not supported");
- return;
- }
-
- /* Validate queue number */
- if (queue >= priv->rxqs_n) {
- ERROR("VLAN stripping, invalid queue number %d", queue);
- return;
- }
-
- priv_lock(priv);
- priv_vlan_strip_queue_set(priv, queue, on);
- priv_unlock(priv);
-}
-
-/**
* Callback to set/reset VLAN offloads for a port.
*
* @param dev
* Pointer to Ethernet device structure.
* @param mask
* VLAN offload bit mask.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
@@ -183,16 +168,13 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
DEV_RX_OFFLOAD_VLAN_STRIP);
if (!priv->config.hw_vlan_strip) {
- ERROR("VLAN stripping is not supported");
+ DRV_LOG(ERR, "port %u VLAN stripping is not supported",
+ dev->data->port_id);
return 0;
}
-
/* Run on every RX queue and set/reset VLAN stripping. */
- priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); i++)
- priv_vlan_strip_queue_set(priv, i, hw_vlan_strip);
- priv_unlock(priv);
+ mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip);
}
-
return 0;
}
diff --git a/drivers/net/mrvl/Makefile b/drivers/net/mrvl/Makefile
deleted file mode 100644
index f75e53c6..00000000
--- a/drivers/net/mrvl/Makefile
+++ /dev/null
@@ -1,68 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Marvell International Ltd.
-# Copyright(c) 2017 Semihalf.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(MAKECMDGOALS),config)
-ifeq ($(LIBMUSDK_PATH),)
-$(error "Please define LIBMUSDK_PATH environment variable")
-endif
-endif
-endif
-
-# library name
-LIB = librte_pmd_mrvl.a
-
-# library version
-LIBABIVER := 1
-
-# versioning export map
-EXPORT_MAP := rte_pmd_mrvl_version.map
-
-# external library dependencies
-CFLAGS += -I$(LIBMUSDK_PATH)/include
-CFLAGS += -DMVCONF_TYPES_PUBLIC
-CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -O3
-LDLIBS += -L$(LIBMUSDK_PATH)/lib
-LDLIBS += -lmusdk
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile
-LDLIBS += -lrte_bus_vdev
-
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_qos.c
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mvpp2/Makefile b/drivers/net/mvpp2/Makefile
new file mode 100644
index 00000000..492aef97
--- /dev/null
+++ b/drivers/net/mvpp2/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvpp2.a
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvpp2_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+LDLIBS += -L$(LIBMUSDK_PATH)/lib
+LDLIBS += -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_qos.c
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_flow.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mvpp2/meson.build b/drivers/net/mvpp2/meson.build
new file mode 100644
index 00000000..e1398895
--- /dev/null
+++ b/drivers/net/mvpp2/meson.build
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs : [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files(
+ 'mrvl_ethdev.c',
+ 'mrvl_flow.c',
+ 'mrvl_qos.c'
+)
+
+deps += ['cfgfile']
diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 705c4bd8..ea6a7864 100644
--- a/drivers/net/mrvl/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Semihalf nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <rte_ethdev_driver.h>
@@ -162,6 +134,7 @@ struct mrvl_txq {
int port_id;
uint64_t bytes_sent;
struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
+ int tx_deferred_start;
};
static int mrvl_lcore_first;
@@ -173,6 +146,32 @@ static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
struct pp2_hif *hif, unsigned int core_id,
struct mrvl_shadow_txq *sq, int qid, int force);
+#define MRVL_XSTATS_TBL_ENTRY(name) { \
+ #name, offsetof(struct pp2_ppio_statistics, name), \
+ sizeof(((struct pp2_ppio_statistics *)0)->name) \
+}
+
+/* Table with xstats data */
+static struct {
+ const char *name;
+ unsigned int offset;
+ unsigned int size;
+} mrvl_xstats_tbl[] = {
+ MRVL_XSTATS_TBL_ENTRY(rx_bytes),
+ MRVL_XSTATS_TBL_ENTRY(rx_packets),
+ MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
+ MRVL_XSTATS_TBL_ENTRY(rx_errors),
+ MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
+ MRVL_XSTATS_TBL_ENTRY(tx_bytes),
+ MRVL_XSTATS_TBL_ENTRY(tx_packets),
+ MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
+ MRVL_XSTATS_TBL_ENTRY(tx_errors)
+};
+
static inline int
mrvl_get_bpool_size(int pp2_id, int pool_id)
{
@@ -319,26 +318,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
- RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
- return -EINVAL;
- }
-
if (dev->data->dev_conf.rxmode.split_hdr_size) {
RTE_LOG(INFO, PMD, "Split headers not supported\n");
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
- RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
- return -EINVAL;
- }
-
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
- RTE_LOG(INFO, PMD, "LRO not supported\n");
- return -EINVAL;
- }
-
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -348,6 +332,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
if (ret < 0)
return ret;
+ ret = mrvl_configure_txqs(priv, dev->data->port_id,
+ dev->data->nb_tx_queues);
+ if (ret < 0)
+ return ret;
+
priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
priv->ppio_params.maintain_stats = 1;
priv->nb_rx_queues = dev->data->nb_rx_queues;
@@ -456,6 +445,70 @@ mrvl_dev_set_link_down(struct rte_eth_dev *dev)
}
/**
+ * DPDK callback to start tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_id
+ * Transmit queue index.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv)
+ return -EPERM;
+
+ /* passing 1 enables given tx queue */
+ ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to start txq %d\n", queue_id);
+ return ret;
+ }
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to stop tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_id
+ * Transmit queue index.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ /* passing 0 disables given tx queue */
+ ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to stop txq %d\n", queue_id);
+ return ret;
+ }
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/**
* DPDK callback to start the device.
*
* @param dev
@@ -469,7 +522,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
{
struct mrvl_priv *priv = dev->data->dev_private;
char match[MRVL_MATCH_LEN];
- int ret = 0, def_init_size;
+ int ret = 0, i, def_init_size;
snprintf(match, sizeof(match), "ppio-%d:%d",
priv->pp_id, priv->ppio_id);
@@ -556,6 +609,24 @@ mrvl_dev_start(struct rte_eth_dev *dev)
goto out;
}
+ /* start tx queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ if (!txq->tx_deferred_start)
+ continue;
+
+ /*
+ * All txqs are started by default. Stop them
+ * so that tx_deferred_start works as expected.
+ */
+ ret = mrvl_tx_queue_stop(dev, i);
+ if (ret)
+ goto out;
+ }
+
return 0;
out:
RTE_LOG(ERR, PMD, "Failed to start device\n");
@@ -682,12 +753,23 @@ mrvl_dev_stop(struct rte_eth_dev *dev)
mrvl_dev_set_link_down(dev);
mrvl_flush_rx_queues(dev);
mrvl_flush_tx_shadow_queues(dev);
+ if (priv->cls_tbl) {
+ pp2_cls_tbl_deinit(priv->cls_tbl);
+ priv->cls_tbl = NULL;
+ }
if (priv->qos_tbl) {
pp2_cls_qos_tbl_deinit(priv->qos_tbl);
priv->qos_tbl = NULL;
}
- pp2_ppio_deinit(priv->ppio);
+ if (priv->ppio)
+ pp2_ppio_deinit(priv->ppio);
priv->ppio = NULL;
+
+ /* policer must be released after ppio deinitialization */
+ if (priv->policer) {
+ pp2_cls_plcr_deinit(priv->policer);
+ priv->policer = NULL;
+ }
}
/**
@@ -800,6 +882,9 @@ mrvl_promiscuous_enable(struct rte_eth_dev *dev)
if (!priv->ppio)
return;
+ if (priv->isolated)
+ return;
+
ret = pp2_ppio_set_promisc(priv->ppio, 1);
if (ret)
RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
@@ -820,6 +905,9 @@ mrvl_allmulticast_enable(struct rte_eth_dev *dev)
if (!priv->ppio)
return;
+ if (priv->isolated)
+ return;
+
ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
if (ret)
RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
@@ -883,6 +971,9 @@ mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
if (!priv->ppio)
return;
+ if (priv->isolated)
+ return;
+
ret = pp2_ppio_remove_mac_addr(priv->ppio,
dev->data->mac_addrs[index].addr_bytes);
if (ret) {
@@ -915,6 +1006,9 @@ mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
char buf[ETHER_ADDR_FMT_SIZE];
int ret;
+ if (priv->isolated)
+ return -ENOTSUP;
+
if (index == 0)
/* For setting index 0, mrvl_mac_addr_set() should be used.*/
return -1;
@@ -952,15 +1046,21 @@ mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
* Pointer to Ethernet device structure.
* @param mac_addr
* MAC address to register.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
*/
-static void
+static int
mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
if (!priv->ppio)
- return;
+ return 0;
+
+ if (priv->isolated)
+ return -ENOTSUP;
ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
if (ret) {
@@ -968,6 +1068,8 @@ mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
ether_format_addr(buf, sizeof(buf), mac_addr);
RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf);
}
+
+ return ret;
}
/**
@@ -1107,6 +1209,90 @@ mrvl_stats_reset(struct rte_eth_dev *dev)
}
/**
+ * DPDK callback to get extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Pointer to xstats table.
+ * @param n
+ * Number of entries in xstats table.
+ * @return
+ * Negative value on error, number of read xstats otherwise.
+ */
+static int
+mrvl_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats, unsigned int n)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_ppio_statistics ppio_stats;
+ unsigned int i;
+
+ if (!stats)
+ return 0;
+
+ pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
+ for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
+ uint64_t val;
+
+ if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
+ val = *(uint32_t *)((uint8_t *)&ppio_stats +
+ mrvl_xstats_tbl[i].offset);
+ else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
+ val = *(uint64_t *)((uint8_t *)&ppio_stats +
+ mrvl_xstats_tbl[i].offset);
+ else
+ return -EINVAL;
+
+ stats[i].id = i;
+ stats[i].value = val;
+ }
+
+ return n;
+}
+
+/**
+ * DPDK callback to reset extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_xstats_reset(struct rte_eth_dev *dev)
+{
+ mrvl_stats_reset(dev);
+}
+
+/**
+ * DPDK callback to get extended statistics names.
+ *
+ * @param dev (unused)
+ * Pointer to Ethernet device structure.
+ * @param xstats_names
+ * Pointer to xstats names table.
+ * @param size
+ * Size of the xstats names table.
+ * @return
+ * Number of read names.
+ */
+static int
+mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (!xstats_names)
+ return RTE_DIM(mrvl_xstats_tbl);
+
+ for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
+ snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
+ mrvl_xstats_tbl[i].name);
+
+ return size;
+}
+
+/**
* DPDK callback to get information about the device.
*
* @param dev
@@ -1217,9 +1403,11 @@ static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
struct rte_eth_txq_info *qinfo)
{
struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
qinfo->nb_desc =
priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
/**
@@ -1243,6 +1431,9 @@ mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
if (!priv->ppio)
return -EPERM;
+ if (priv->isolated)
+ return -ENOTSUP;
+
return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
pp2_ppio_remove_vlan(priv->ppio, vlan_id);
}
@@ -1261,8 +1452,8 @@ mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
static int
mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
{
- struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
- struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
+ struct buff_release_entry entries[MRVL_PP2_RXD_MAX];
+ struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX];
int i, ret;
unsigned int core_id;
struct pp2_hif *hif;
@@ -1316,42 +1507,6 @@ out:
}
/**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- * dev Pointer to the device.
- * @param
- * requested Bitmap of the requested offloads.
- *
- * @return
- * 1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = MRVL_RX_OFFLOADS;
- uint64_t unsupported = requested & ~supported;
- uint64_t missing = mandatory & ~requested;
-
- if (unsupported) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- requested, supported);
- return 0;
- }
-
- if (missing) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
- "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
- requested, missing);
- return 0;
- }
-
- return 1;
-}
-
-/**
* DPDK callback to configure the receive queue.
*
* @param dev
@@ -1381,9 +1536,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
uint32_t min_size,
max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
int ret, tc, inq;
+ uint64_t offloads;
- if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
- return -ENOTSUP;
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
/*
@@ -1416,8 +1571,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rxq->priv = priv;
rxq->mp = mp;
- rxq->cksum_enabled =
- dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1480,42 +1634,6 @@ mrvl_rx_queue_release(void *rxq)
}
/**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- * dev Pointer to the device.
- * @param
- * requested Bitmap of the requested offloads.
- *
- * @return
- * 1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
- uint64_t supported = MRVL_TX_OFFLOADS;
- uint64_t unsupported = requested & ~supported;
- uint64_t missing = mandatory & ~requested;
-
- if (unsupported) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- requested, supported);
- return 0;
- }
-
- if (missing) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
- "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
- requested, missing);
- return 0;
- }
-
- return 1;
-}
-
-/**
* DPDK callback to configure the transmit queue.
*
* @param dev
@@ -1527,7 +1645,7 @@ mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
* @param socket
* NUMA socket on which memory must be allocated.
* @param conf
- * Thresholds parameters.
+ * Tx queue configuration parameters.
*
* @return
* 0 on success, negative error value otherwise.
@@ -1540,9 +1658,6 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_txq *txq;
- if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
- return -ENOTSUP;
-
if (dev->data->tx_queues[idx]) {
rte_free(dev->data->tx_queues[idx]);
dev->data->tx_queues[idx] = NULL;
@@ -1555,10 +1670,10 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
txq->priv = priv;
txq->queue_id = idx;
txq->port_id = dev->data->port_id;
+ txq->tx_deferred_start = conf->tx_deferred_start;
dev->data->tx_queues[idx] = txq;
priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
- priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
return 0;
}
@@ -1581,6 +1696,82 @@ mrvl_tx_queue_release(void *txq)
}
/**
+ * DPDK callback to get flow control configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param fc_conf
+ * Pointer to the flow control configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret, en;
+
+ if (!priv)
+ return -EPERM;
+
+ ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to read rx pause state\n");
+ return ret;
+ }
+
+ fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to set flow control configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param fc_conf
+ * Pointer to the flow control configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (!priv)
+ return -EPERM;
+
+ if (fc_conf->high_water ||
+ fc_conf->low_water ||
+ fc_conf->pause_time ||
+ fc_conf->mac_ctrl_frame_fwd ||
+ fc_conf->autoneg) {
+ RTE_LOG(ERR, PMD, "Flowctrl parameter is not supported\n");
+
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == RTE_FC_NONE ||
+ fc_conf->mode == RTE_FC_RX_PAUSE) {
+ int ret, en;
+
+ en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
+ ret = pp2_ppio_set_rx_pause(priv->ppio, en);
+ if (ret)
+ RTE_LOG(ERR, PMD,
+ "Failed to change flowctrl on RX side\n");
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
* Update RSS hash configuration
*
* @param dev
@@ -1597,6 +1788,9 @@ mrvl_rss_hash_update(struct rte_eth_dev *dev,
{
struct mrvl_priv *priv = dev->data->dev_private;
+ if (priv->isolated)
+ return -ENOTSUP;
+
return mrvl_configure_rss(priv, rss_conf);
}
@@ -1633,6 +1827,39 @@ mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * DPDK callback to get rte_flow callbacks.
+ *
+ * @param dev
+ * Pointer to the device structure.
+ * @param filer_type
+ * Flow filter type.
+ * @param filter_op
+ * Flow filter operation.
+ * @param arg
+ * Pointer to pass the flow ops.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mrvl_flow_ops;
+ return 0;
+ default:
+ RTE_LOG(WARNING, PMD, "Filter type (%d) not supported",
+ filter_type);
+ return -EINVAL;
+ }
+}
+
static const struct eth_dev_ops mrvl_ops = {
.dev_configure = mrvl_dev_configure,
.dev_start = mrvl_dev_start,
@@ -1651,17 +1878,25 @@ static const struct eth_dev_ops mrvl_ops = {
.mtu_set = mrvl_mtu_set,
.stats_get = mrvl_stats_get,
.stats_reset = mrvl_stats_reset,
+ .xstats_get = mrvl_xstats_get,
+ .xstats_reset = mrvl_xstats_reset,
+ .xstats_get_names = mrvl_xstats_get_names,
.dev_infos_get = mrvl_dev_infos_get,
.dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
.rxq_info_get = mrvl_rxq_info_get,
.txq_info_get = mrvl_txq_info_get,
.vlan_filter_set = mrvl_vlan_filter_set,
+ .tx_queue_start = mrvl_tx_queue_start,
+ .tx_queue_stop = mrvl_tx_queue_stop,
.rx_queue_setup = mrvl_rx_queue_setup,
.rx_queue_release = mrvl_rx_queue_release,
.tx_queue_setup = mrvl_tx_queue_setup,
.tx_queue_release = mrvl_tx_queue_release,
+ .flow_ctrl_get = mrvl_flow_ctrl_get,
+ .flow_ctrl_set = mrvl_flow_ctrl_set,
.rss_hash_update = mrvl_rss_hash_update,
.rss_hash_conf_get = mrvl_rss_hash_conf_get,
+ .filter_ctrl = mrvl_eth_filter_ctrl,
};
/**
@@ -2280,6 +2515,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
eth_dev->device = &vdev->device;
eth_dev->dev_ops = &mrvl_ops;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
out_free_mac:
rte_free(eth_dev->data->mac_addrs);
@@ -2484,7 +2720,7 @@ rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
RTE_LOG(INFO, PMD, "Removing %s\n", name);
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
char ifname[RTE_ETH_NAME_MAX_LEN];
rte_eth_dev_get_name_by_port(i, ifname);
@@ -2507,5 +2743,5 @@ static struct rte_vdev_driver pmd_mrvl_drv = {
.remove = rte_pmd_mrvl_remove,
};
-RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
-RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);
+RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
+RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
diff --git a/drivers/net/mrvl/mrvl_ethdev.h b/drivers/net/mvpp2/mrvl_ethdev.h
index f7afae5c..3a428092 100644
--- a/drivers/net/mrvl/mrvl_ethdev.h
+++ b/drivers/net/mvpp2/mrvl_ethdev.h
@@ -1,41 +1,14 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#ifndef _MRVL_ETHDEV_H_
#define _MRVL_ETHDEV_H_
#include <rte_spinlock.h>
+#include <rte_flow_driver.h>
#include <env/mv_autogen_comp_flags.h>
#include <drivers/mv_pp2.h>
@@ -108,11 +81,21 @@ struct mrvl_priv {
uint8_t rss_hf_tcp;
uint8_t uc_mc_flushed;
uint8_t vlan_flushed;
+ uint8_t isolated;
struct pp2_ppio_params ppio_params;
struct pp2_cls_qos_tbl_params qos_tbl_params;
struct pp2_cls_tbl *qos_tbl;
uint16_t nb_rx_queues;
+
+ struct pp2_cls_tbl_params cls_tbl_params;
+ struct pp2_cls_tbl *cls_tbl;
+ uint32_t cls_tbl_pattern;
+ LIST_HEAD(mrvl_flows, rte_flow) flows;
+
+ struct pp2_cls_plcr *policer;
};
+/** Flow operations forward declaration. */
+extern const struct rte_flow_ops mrvl_flow_ops;
#endif /* _MRVL_ETHDEV_H_ */
diff --git a/drivers/net/mvpp2/mrvl_flow.c b/drivers/net/mvpp2/mrvl_flow.c
new file mode 100644
index 00000000..437c987c
--- /dev/null
+++ b/drivers/net/mvpp2/mrvl_flow.c
@@ -0,0 +1,2779 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include <arpa/inet.h>
+
+#ifdef container_of
+#undef container_of
+#endif
+
+#include "mrvl_ethdev.h"
+#include "mrvl_qos.h"
+#include "env/mv_common.h" /* for BIT() */
+
+/** Number of rules in the classifier table. */
+#define MRVL_CLS_MAX_NUM_RULES 20
+
+/** Size of the classifier key and mask strings. */
+#define MRVL_CLS_STR_SIZE_MAX 40
+
+/** Parsed fields in processed rte_flow_item. */
+enum mrvl_parsed_fields {
+ /* eth flags */
+ F_DMAC = BIT(0),
+ F_SMAC = BIT(1),
+ F_TYPE = BIT(2),
+ /* vlan flags */
+ F_VLAN_ID = BIT(3),
+ F_VLAN_PRI = BIT(4),
+ F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */
+ /* ip4 flags */
+ F_IP4_TOS = BIT(6),
+ F_IP4_SIP = BIT(7),
+ F_IP4_DIP = BIT(8),
+ F_IP4_PROTO = BIT(9),
+ /* ip6 flags */
+ F_IP6_TC = BIT(10), /* not supported by MUSDK yet */
+ F_IP6_SIP = BIT(11),
+ F_IP6_DIP = BIT(12),
+ F_IP6_FLOW = BIT(13),
+ F_IP6_NEXT_HDR = BIT(14),
+ /* tcp flags */
+ F_TCP_SPORT = BIT(15),
+ F_TCP_DPORT = BIT(16),
+ /* udp flags */
+ F_UDP_SPORT = BIT(17),
+ F_UDP_DPORT = BIT(18),
+};
+
+/** PMD-specific definition of a flow rule handle. */
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next;
+
+ enum mrvl_parsed_fields pattern;
+
+ struct pp2_cls_tbl_rule rule;
+ struct pp2_cls_cos_desc cos;
+ struct pp2_cls_tbl_action action;
+};
+
+static const enum rte_flow_item_type pattern_eth[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_vlan_ip[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip_udp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_udp[] = {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+#define MRVL_VLAN_ID_MASK 0x0fff
+#define MRVL_VLAN_PRI_MASK 0x7000
+#define MRVL_IPV4_DSCP_MASK 0xfc
+#define MRVL_IPV4_ADDR_MASK 0xffffffff
+#define MRVL_IPV6_FLOW_MASK 0x0fffff
+
+/**
+ * Given a flow item, return the next non-void one.
+ *
+ * @param items Pointer to the item in the table.
+ * @returns Next not-void item, NULL otherwise.
+ */
+static const struct rte_flow_item *
+mrvl_next_item(const struct rte_flow_item *items)
+{
+ const struct rte_flow_item *item = items;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return item;
+ }
+
+ return NULL;
+}
+
+/**
+ * Allocate memory for classifier rule key and mask fields.
+ *
+ * @param field Pointer to the classifier rule.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
+{
+ unsigned int id = rte_socket_id();
+
+ field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
+ if (!field->key)
+ goto out;
+
+ field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
+ if (!field->mask)
+ goto out_mask;
+
+ return 0;
+out_mask:
+ rte_free(field->key);
+out:
+ field->key = NULL;
+ field->mask = NULL;
+ return -1;
+}
+
+/**
+ * Free memory allocated for classifier rule key and mask fields.
+ *
+ * @param field Pointer to the classifier rule.
+ */
+static void
+mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
+{
+ rte_free(field->key);
+ rte_free(field->mask);
+ field->key = NULL;
+ field->mask = NULL;
+}
+
+/**
+ * Free memory allocated for all classifier rule key and mask fields.
+ *
+ * @param rule Pointer to the classifier table rule.
+ */
+static void
+mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
+{
+ int i;
+
+ for (i = 0; i < rule->num_fields; i++)
+ mrvl_free_key_mask(&rule->fields[i]);
+ rule->num_fields = 0;
+}
+
+/*
+ * Initialize rte flow item parsing.
+ *
+ * @param item Pointer to the flow item.
+ * @param spec_ptr Pointer to the specific item pointer.
+ * @param mask_ptr Pointer to the specific item's mask pointer.
+ * @def_mask Pointer to the default mask.
+ * @size Size of the flow item.
+ * @error Pointer to the rte flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_init(const struct rte_flow_item *item,
+ const void **spec_ptr,
+ const void **mask_ptr,
+ const void *def_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *spec;
+ const uint8_t *mask;
+ const uint8_t *last;
+ uint8_t zeros[size];
+
+ memset(zeros, 0, size);
+
+ if (item == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "NULL item\n");
+ return -rte_errno;
+ }
+
+ if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Mask or last is set without spec\n");
+ return -rte_errno;
+ }
+
+ /*
+ * If "mask" is not set, default mask is used,
+ * but if default mask is NULL, "mask" should be set.
+ */
+ if (item->mask == NULL) {
+ if (def_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Mask should be specified\n");
+ return -rte_errno;
+ }
+
+ mask = (const uint8_t *)def_mask;
+ } else {
+ mask = (const uint8_t *)item->mask;
+ }
+
+ spec = (const uint8_t *)item->spec;
+ last = (const uint8_t *)item->last;
+
+ if (spec == NULL) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Spec should be specified\n");
+ return -rte_errno;
+ }
+
+ /*
+ * If field values in "last" are either 0 or equal to the corresponding
+ * values in "spec" then they are ignored.
+ */
+ if (last != NULL &&
+ !memcmp(last, zeros, size) &&
+ memcmp(last, spec, size) != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Ranging is not supported\n");
+ return -rte_errno;
+ }
+
+ *spec_ptr = spec;
+ *mask_ptr = mask;
+
+ return 0;
+}
+
+/**
+ * Parse the eth flow item.
+ *
+ * This will create classifier rule that matches either destination or source
+ * mac.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param mask Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_mac(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ const uint8_t *k, *m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ if (parse_dst) {
+ k = spec->dst.addr_bytes;
+ m = mask->dst.addr_bytes;
+
+ flow->pattern |= F_DMAC;
+ } else {
+ k = spec->src.addr_bytes;
+ m = mask->src.addr_bytes;
+
+ flow->pattern |= F_SMAC;
+ }
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 6;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ k[0], k[1], k[2], k[3], k[4], k[5]);
+
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ m[0], m[1], m[2], m[3], m[4], m[5]);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing the eth flow item destination mac address.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_mac(spec, mask, 1, flow);
+}
+
+/**
+ * Helper for parsing the eth flow item source mac address.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_smac(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_mac(spec, mask, 0, flow);
+}
+
+/**
+ * Parse the ether type field of the eth flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_type(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ k = rte_be_to_cpu_16(spec->type);
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_TYPE;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the vid field of the vlan rte flow item.
+ *
+ * This will create classifier rule that matches vid.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
+ const struct rte_flow_item_vlan *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_VLAN_ID;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the pri field of the vlan rte flow item.
+ *
+ * This will create classifier rule that matches pri.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
+ const struct rte_flow_item_vlan *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_VLAN_PRI;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the dscp field of the ipv4 rte flow item.
+ *
+ * This will create classifier rule that matches dscp field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint8_t k, m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
+ m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
+
+ flow->pattern |= F_IP4_TOS;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse either source or destination ip addresses of the ipv4 flow item.
+ *
+ * This will create classifier rule that matches either destination
+ * or source ip field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ struct in_addr k;
+ uint32_t m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ memset(&k, 0, sizeof(k));
+ if (parse_dst) {
+ k.s_addr = spec->hdr.dst_addr;
+ m = rte_be_to_cpu_32(mask->hdr.dst_addr);
+
+ flow->pattern |= F_IP4_DIP;
+ } else {
+ k.s_addr = spec->hdr.src_addr;
+ m = rte_be_to_cpu_32(mask->hdr.src_addr);
+
+ flow->pattern |= F_IP4_SIP;
+ }
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 4;
+
+ inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing destination ip of the ipv4 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip4_addr(spec, mask, 1, flow);
+}
+
+/**
+ * Helper for parsing source ip of the ipv4 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip4_addr(spec, mask, 0, flow);
+}
+
+/**
+ * Parse the proto field of the ipv4 rte flow item.
+ *
+ * This will create classifier rule that matches proto field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint8_t k = spec->hdr.next_proto_id;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_IP4_PROTO;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse either source or destination ip addresses of the ipv6 rte flow item.
+ *
+ * This will create classifier rule that matches either destination
+ * or source ip field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ int size = sizeof(spec->hdr.dst_addr);
+ struct in6_addr k, m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ memset(&k, 0, sizeof(k));
+ if (parse_dst) {
+ memcpy(k.s6_addr, spec->hdr.dst_addr, size);
+ memcpy(m.s6_addr, mask->hdr.dst_addr, size);
+
+ flow->pattern |= F_IP6_DIP;
+ } else {
+ memcpy(k.s6_addr, spec->hdr.src_addr, size);
+ memcpy(m.s6_addr, mask->hdr.src_addr, size);
+
+ flow->pattern |= F_IP6_SIP;
+ }
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 16;
+
+ inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
+ inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing destination ip of the ipv6 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip6_addr(spec, mask, 1, flow);
+}
+
+/**
+ * Helper for parsing source ip of the ipv6 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip6_addr(spec, mask, 0, flow);
+}
+
+/**
+ * Parse the flow label of the ipv6 flow item.
+ *
+ * This will create classifier rule that matches flow field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
+ m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 3;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
+
+ flow->pattern |= F_IP6_FLOW;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the next header of the ipv6 flow item.
+ *
+ * This will create classifier rule that matches next header field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint8_t k = spec->hdr.proto;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_IP6_NEXT_HDR;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse destination or source port of the tcp flow item.
+ *
+ * This will create classifier rule that matches either destination or
+ * source tcp port.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
+ const struct rte_flow_item_tcp *mask __rte_unused,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ if (parse_dst) {
+ k = rte_be_to_cpu_16(spec->hdr.dst_port);
+
+ flow->pattern |= F_TCP_DPORT;
+ } else {
+ k = rte_be_to_cpu_16(spec->hdr.src_port);
+
+ flow->pattern |= F_TCP_SPORT;
+ }
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing the tcp source port of the tcp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
+ const struct rte_flow_item_tcp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_tcp_port(spec, mask, 0, flow);
+}
+
+/**
+ * Helper for parsing the tcp destination port of the tcp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
+ const struct rte_flow_item_tcp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_tcp_port(spec, mask, 1, flow);
+}
+
+/**
+ * Parse destination or source port of the udp flow item.
+ *
+ * This will create classifier rule that matches either destination or
+ * source udp port.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask __rte_unused,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ if (parse_dst) {
+ k = rte_be_to_cpu_16(spec->hdr.dst_port);
+
+ flow->pattern |= F_UDP_DPORT;
+ } else {
+ k = rte_be_to_cpu_16(spec->hdr.src_port);
+
+ flow->pattern |= F_UDP_SPORT;
+ }
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing the udp source port of the udp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_udp_port(spec, mask, 0, flow);
+}
+
+/**
+ * Helper for parsing the udp destination port of the udp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_udp_port(spec, mask, 1, flow);
+}
+
+/**
+ * Parse eth flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
+ struct ether_addr zero;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_eth_mask,
+ sizeof(struct rte_flow_item_eth), error);
+ if (ret)
+ return ret;
+
+ memset(&zero, 0, sizeof(zero));
+
+ if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
+ ret = mrvl_parse_dmac(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
+ ret = mrvl_parse_smac(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->type) {
+ RTE_LOG(WARNING, PMD, "eth type mask is ignored\n");
+ ret = mrvl_parse_type(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse vlan flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_vlan(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
+ uint16_t m;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_vlan_mask,
+ sizeof(struct rte_flow_item_vlan), error);
+ if (ret)
+ return ret;
+
+ m = rte_be_to_cpu_16(mask->tci);
+ if (m & MRVL_VLAN_ID_MASK) {
+ RTE_LOG(WARNING, PMD, "vlan id mask is ignored\n");
+ ret = mrvl_parse_vlan_id(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (m & MRVL_VLAN_PRI_MASK) {
+ RTE_LOG(WARNING, PMD, "vlan pri mask is ignored\n");
+ ret = mrvl_parse_vlan_pri(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (flow->pattern & F_TYPE) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN TPID matching is not supported\n");
+ return -rte_errno;
+ }
+ if (mask->inner_type) {
+ struct rte_flow_item_eth spec_eth = {
+ .type = spec->inner_type,
+ };
+ struct rte_flow_item_eth mask_eth = {
+ .type = mask->inner_type,
+ };
+
+ RTE_LOG(WARNING, PMD, "inner eth type mask is ignored\n");
+ ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse ipv4 flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_ip4(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret)
+ return ret;
+
+ if (mask->hdr.version_ihl ||
+ mask->hdr.total_length ||
+ mask->hdr.packet_id ||
+ mask->hdr.fragment_offset ||
+ mask->hdr.time_to_live ||
+ mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
+ ret = mrvl_parse_ip4_dscp(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.src_addr) {
+ ret = mrvl_parse_ip4_sip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.dst_addr) {
+ ret = mrvl_parse_ip4_dip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.next_proto_id) {
+ RTE_LOG(WARNING, PMD, "next proto id mask is ignored\n");
+ ret = mrvl_parse_ip4_proto(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse ipv6 flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_ip6(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
+ struct ipv6_hdr zero;
+ uint32_t flow_mask;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec,
+ (const void **)&mask,
+ &rte_flow_item_ipv6_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
+ if (ret)
+ return ret;
+
+ memset(&zero, 0, sizeof(zero));
+
+ if (mask->hdr.payload_len ||
+ mask->hdr.hop_limits) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (memcmp(mask->hdr.src_addr,
+ zero.src_addr, sizeof(mask->hdr.src_addr))) {
+ ret = mrvl_parse_ip6_sip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (memcmp(mask->hdr.dst_addr,
+ zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
+ ret = mrvl_parse_ip6_dip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
+ if (flow_mask) {
+ ret = mrvl_parse_ip6_flow(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.proto) {
+ RTE_LOG(WARNING, PMD, "next header mask is ignored\n");
+ ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse tcp flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_tcp(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret)
+ return ret;
+
+ if (mask->hdr.sent_seq ||
+ mask->hdr.recv_ack ||
+ mask->hdr.data_off ||
+ mask->hdr.tcp_flags ||
+ mask->hdr.rx_win ||
+ mask->hdr.cksum ||
+ mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (mask->hdr.src_port) {
+ RTE_LOG(WARNING, PMD, "tcp sport mask is ignored\n");
+ ret = mrvl_parse_tcp_sport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.dst_port) {
+ RTE_LOG(WARNING, PMD, "tcp dport mask is ignored\n");
+ ret = mrvl_parse_tcp_dport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse udp flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_udp(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret)
+ return ret;
+
+ if (mask->hdr.dgram_len ||
+ mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (mask->hdr.src_port) {
+ RTE_LOG(WARNING, PMD, "udp sport mask is ignored\n");
+ ret = mrvl_parse_udp_sport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.dst_port) {
+ RTE_LOG(WARNING, PMD, "udp dport mask is ignored\n");
+ ret = mrvl_parse_udp_dport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse flow pattern composed of the the eth item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_eth(pattern, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth and vlan items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_eth(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return mrvl_parse_vlan(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, vlan and ip4/ip6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_eth(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ ret = mrvl_parse_vlan(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, vlan and ipv4 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the eth, vlan and ipv6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth and ip4/ip6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_eth(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth and ipv4 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the eth and ipv6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ip4 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param tcp 1 to parse tcp item, 0 to parse udp item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv4 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv4 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv6 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param tcp 1 to parse tcp item, 0 to parse udp item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the vlan item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return mrvl_parse_vlan(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan and ip4/ip6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_vlan(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan and ipv4 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv4 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv4 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the vlan and ipv6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ip4/ip6 item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4 item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ipv6 item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the ip4/ip6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return mrvl_parse_tcp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ipv6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4/ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the tcp item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return mrvl_parse_tcp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the udp item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Structure used to map specific flow pattern to the pattern parse callback
+ * which will iterate over each pattern item and extract relevant data.
+ */
+static const struct {
+ const enum rte_flow_item_type *pattern;
+ int (*parse)(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+} mrvl_patterns[] = {
+ { pattern_eth, mrvl_parse_pattern_eth },
+ { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan },
+ { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 },
+ { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 },
+ { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 },
+ { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp },
+ { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp },
+ { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 },
+ { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp },
+ { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp },
+ { pattern_vlan, mrvl_parse_pattern_vlan },
+ { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 },
+ { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp },
+ { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp },
+ { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 },
+ { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp },
+ { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp },
+ { pattern_ip, mrvl_parse_pattern_ip4 },
+ { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp },
+ { pattern_ip_udp, mrvl_parse_pattern_ip4_udp },
+ { pattern_ip6, mrvl_parse_pattern_ip6 },
+ { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp },
+ { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp },
+ { pattern_tcp, mrvl_parse_pattern_tcp },
+ { pattern_udp, mrvl_parse_pattern_udp }
+};
+
+/**
+ * Check whether provided pattern matches any of the supported ones.
+ *
+ * @param type_pattern Pointer to the pattern type.
+ * @param item_pattern Pointer to the flow pattern.
+ * @returns 1 in case of success, 0 value otherwise.
+ */
+static int
+mrvl_patterns_match(const enum rte_flow_item_type *type_pattern,
+ const struct rte_flow_item *item_pattern)
+{
+ const enum rte_flow_item_type *type = type_pattern;
+ const struct rte_flow_item *item = item_pattern;
+
+ for (;;) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ item++;
+ continue;
+ }
+
+ if (*type == RTE_FLOW_ITEM_TYPE_END ||
+ item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (*type != item->type)
+ break;
+
+ item++;
+ type++;
+ }
+
+ return *type == item->type;
+}
+
+/**
+ * Parse flow attribute.
+ *
+ * This will check whether the provided attribute's flags are supported.
+ *
+ * @param priv Unused
+ * @param attr Pointer to the flow attribute.
+ * @param flow Unused
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
+ const struct rte_flow_attr *attr,
+ struct rte_flow *flow __rte_unused,
+ struct rte_flow_error *error)
+{
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute");
+ return -rte_errno;
+ }
+
+ if (attr->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+ if (!attr->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+ if (attr->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+ if (attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse flow pattern.
+ *
+ * Specific classifier rule will be created as well.
+ *
+ * @param priv Unused
+ * @param pattern Pointer to the flow pattern.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
+ const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < RTE_DIM(mrvl_patterns); i++) {
+ if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern))
+ continue;
+
+ ret = mrvl_patterns[i].parse(pattern, flow, error);
+ if (ret)
+ mrvl_free_all_key_mask(&flow->rule);
+
+ return ret;
+ }
+
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Unsupported pattern");
+
+ return -rte_errno;
+}
+
+/**
+ * Parse flow actions.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param actions Pointer the action table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse_actions(struct mrvl_priv *priv,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action = actions;
+ int specified = 0;
+
+ for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+
+ if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ flow->cos.ppio = priv->ppio;
+ flow->cos.tc = 0;
+ flow->action.type = PP2_CLS_TBL_ACT_DROP;
+ flow->action.cos = &flow->cos;
+ specified++;
+ } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *q =
+ (const struct rte_flow_action_queue *)
+ action->conf;
+
+ if (q->index > priv->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Queue index out of range");
+ return -rte_errno;
+ }
+
+ if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
+ /*
+ * Unknown TC mapping, mapping will not have
+ * a correct queue.
+ */
+ RTE_LOG(ERR, PMD,
+ "Unknown TC mapping for queue %hu eth%hhu\n",
+ q->index, priv->ppio_id);
+
+ rte_flow_error_set(error, EFAULT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+ return -rte_errno;
+ }
+
+ RTE_LOG(DEBUG, PMD,
+ "Action: Assign packets to queue %d, tc:%d, q:%d\n",
+ q->index, priv->rxq_map[q->index].tc,
+ priv->rxq_map[q->index].inq);
+
+ flow->cos.ppio = priv->ppio;
+ flow->cos.tc = priv->rxq_map[q->index].tc;
+ flow->action.type = PP2_CLS_TBL_ACT_DONE;
+ flow->action.cos = &flow->cos;
+ specified++;
+ } else {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Action not supported");
+ return -rte_errno;
+ }
+
+ }
+
+ if (!specified) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Action not specified");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse flow attribute, pattern and actions.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param attr Pointer to the flow attribute.
+ * @param pattern Pointer to the flow pattern.
+ * @param actions Pointer to the flow actions.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = mrvl_flow_parse_attr(priv, attr, flow, error);
+ if (ret)
+ return ret;
+
+ ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
+ if (ret)
+ return ret;
+
+ return mrvl_flow_parse_actions(priv, actions, flow, error);
+}
+
+static inline enum pp2_cls_tbl_type
+mrvl_engine_type(const struct rte_flow *flow)
+{
+ int i, size = 0;
+
+ for (i = 0; i < flow->rule.num_fields; i++)
+ size += flow->rule.fields[i].size;
+
+ /*
+ * For maskable engine type the key size must be up to 8 bytes.
+ * For keys with size bigger than 8 bytes, engine type must
+ * be set to exact match.
+ */
+ if (size > 8)
+ return PP2_CLS_TBL_EXACT_MATCH;
+
+ return PP2_CLS_TBL_MASKABLE;
+}
+
+static int
+mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
+ int ret;
+
+ if (priv->cls_tbl) {
+ pp2_cls_tbl_deinit(priv->cls_tbl);
+ priv->cls_tbl = NULL;
+ }
+
+ memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
+
+ priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
+ RTE_LOG(INFO, PMD, "Setting cls search engine type to %s\n",
+ priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
+ "exact" : "maskable");
+ priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
+ priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
+ priv->cls_tbl_params.default_act.cos = &first_flow->cos;
+
+ if (first_flow->pattern & F_DMAC) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
+ key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
+ key->key_size += 6;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_SMAC) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
+ key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
+ key->key_size += 6;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_TYPE) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
+ key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_VLAN_ID) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
+ key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_VLAN_PRI) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
+ key->proto_field[key->num_fields].field.vlan =
+ MV_NET_VLAN_F_PRI;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_TOS) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_SIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
+ key->key_size += 4;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_DIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
+ key->key_size += 4;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_PROTO) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 =
+ MV_NET_IP4_F_PROTO;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_SIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
+ key->key_size += 16;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_DIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
+ key->key_size += 16;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_FLOW) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 =
+ MV_NET_IP6_F_FLOW;
+ key->key_size += 3;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_NEXT_HDR) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 =
+ MV_NET_IP6_F_NEXT_HDR;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_TCP_SPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
+ key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_TCP_DPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
+ key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_UDP_SPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
+ key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_UDP_DPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
+ key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
+ if (!ret)
+ priv->cls_tbl_pattern = first_flow->pattern;
+
+ return ret;
+}
+
+/**
+ * Check whether new flow can be added to the table
+ *
+ * @param priv Pointer to the port's private data.
+ * @param flow Pointer to the new flow.
+ * @return 1 in case flow can be added, 0 otherwise.
+ */
+static inline int
+mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
+{
+ return flow->pattern == priv->cls_tbl_pattern &&
+ mrvl_engine_type(flow) == priv->cls_tbl_params.type;
+}
+
+/**
+ * DPDK flow create callback called when flow is to be created.
+ *
+ * @param dev Pointer to the device.
+ * @param attr Pointer to the flow attribute.
+ * @param pattern Pointer to the flow pattern.
+ * @param actions Pointer to the flow actions.
+ * @param error Pointer to the flow error.
+ * @returns Pointer to the created flow in case of success, NULL otherwise.
+ */
+static struct rte_flow *
+mrvl_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow, *first;
+ int ret;
+
+ if (!dev->data->dev_started) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Port must be started first\n");
+ return NULL;
+ }
+
+ flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
+ if (!flow)
+ return NULL;
+
+ ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
+ if (ret)
+ goto out;
+
+ /*
+ * Four cases here:
+ *
+ * 1. In case table does not exist - create one.
+ * 2. In case table exists, is empty and new flow cannot be added
+ * recreate table.
+ * 3. In case table is not empty and new flow matches table format
+ * add it.
+ * 4. Otherwise flow cannot be added.
+ */
+ first = LIST_FIRST(&priv->flows);
+ if (!priv->cls_tbl) {
+ ret = mrvl_create_cls_table(dev, flow);
+ } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
+ ret = mrvl_create_cls_table(dev, flow);
+ } else if (mrvl_flow_can_be_added(priv, flow)) {
+ ret = 0;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Pattern does not match cls table format\n");
+ goto out;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create cls table\n");
+ goto out;
+ }
+
+ ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to add rule\n");
+ goto out;
+ }
+
+ LIST_INSERT_HEAD(&priv->flows, flow, next);
+
+ return flow;
+out:
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Remove classifier rule associated with given flow.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ if (!priv->cls_tbl) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Classifier table not initialized");
+ return -rte_errno;
+ }
+
+ ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to remove rule");
+ return -rte_errno;
+ }
+
+ mrvl_free_all_key_mask(&flow->rule);
+
+ return 0;
+}
+
+/**
+ * DPDK flow destroy callback called when flow is to be removed.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct rte_flow *f;
+ int ret;
+
+ LIST_FOREACH(f, &priv->flows, next) {
+ if (f == flow)
+ break;
+ }
+
+ if (!flow) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Rule was not found");
+ return -rte_errno;
+ }
+
+ LIST_REMOVE(f, next);
+
+ ret = mrvl_flow_remove(priv, flow, error);
+ if (ret)
+ return ret;
+
+ rte_free(flow);
+
+ return 0;
+}
+
+/**
+ * DPDK flow callback called to verify given attribute, pattern and actions.
+ *
+ * @param dev Pointer to the device.
+ * @param attr Pointer to the flow attribute.
+ * @param pattern Pointer to the flow pattern.
+ * @param actions Pointer to the flow actions.
+ * @param error Pointer to the flow error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ static struct rte_flow *flow;
+
+ flow = mrvl_flow_create(dev, attr, pattern, actions, error);
+ if (!flow)
+ return -rte_errno;
+
+ mrvl_flow_destroy(dev, flow, error);
+
+ return 0;
+}
+
+/**
+ * DPDK flow flush callback called when flows are to be flushed.
+ *
+ * @param dev Pointer to the device.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ while (!LIST_EMPTY(&priv->flows)) {
+ struct rte_flow *flow = LIST_FIRST(&priv->flows);
+ int ret = mrvl_flow_remove(priv, flow, error);
+ if (ret)
+ return ret;
+
+ LIST_REMOVE(flow, next);
+ rte_free(flow);
+ }
+
+ return 0;
+}
+
+/**
+ * DPDK flow isolate callback called to isolate port.
+ *
+ * @param dev Pointer to the device.
+ * @param enable Pass 0/1 to disable/enable port isolation.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
+ struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (dev->data->dev_started) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port must be stopped first\n");
+ return -rte_errno;
+ }
+
+ priv->isolated = enable;
+
+ return 0;
+}
+
+const struct rte_flow_ops mrvl_flow_ops = {
+ .validate = mrvl_flow_validate,
+ .create = mrvl_flow_create,
+ .destroy = mrvl_flow_destroy,
+ .flush = mrvl_flow_flush,
+ .isolate = mrvl_flow_isolate
+};
diff --git a/drivers/net/mrvl/mrvl_qos.c b/drivers/net/mvpp2/mrvl_qos.c
index fbb36813..70d000ca 100644
--- a/drivers/net/mrvl/mrvl_qos.c
+++ b/drivers/net/mvpp2/mrvl_qos.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <stdint.h>
@@ -64,12 +36,35 @@
#define MRVL_TOK_PCP "pcp"
#define MRVL_TOK_PORT "port"
#define MRVL_TOK_RXQ "rxq"
-#define MRVL_TOK_SP "SP"
#define MRVL_TOK_TC "tc"
#define MRVL_TOK_TXQ "txq"
#define MRVL_TOK_VLAN "vlan"
#define MRVL_TOK_VLAN_IP "vlan/ip"
-#define MRVL_TOK_WEIGHT "weight"
+
+/* egress specific configuration tokens */
+#define MRVL_TOK_BURST_SIZE "burst_size"
+#define MRVL_TOK_RATE_LIMIT "rate_limit"
+#define MRVL_TOK_RATE_LIMIT_ENABLE "rate_limit_enable"
+#define MRVL_TOK_SCHED_MODE "sched_mode"
+#define MRVL_TOK_SCHED_MODE_SP "sp"
+#define MRVL_TOK_SCHED_MODE_WRR "wrr"
+#define MRVL_TOK_WRR_WEIGHT "wrr_weight"
+
+/* policer specific configuration tokens */
+#define MRVL_TOK_PLCR_ENABLE "policer_enable"
+#define MRVL_TOK_PLCR_UNIT "token_unit"
+#define MRVL_TOK_PLCR_UNIT_BYTES "bytes"
+#define MRVL_TOK_PLCR_UNIT_PACKETS "packets"
+#define MRVL_TOK_PLCR_COLOR "color_mode"
+#define MRVL_TOK_PLCR_COLOR_BLIND "blind"
+#define MRVL_TOK_PLCR_COLOR_AWARE "aware"
+#define MRVL_TOK_PLCR_CIR "cir"
+#define MRVL_TOK_PLCR_CBS "cbs"
+#define MRVL_TOK_PLCR_EBS "ebs"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR "default_color"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN "green"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW "yellow"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR_RED "red"
/** Number of tokens in range a-b = 2. */
#define MAX_RNG_TOKENS 2
@@ -131,12 +126,69 @@ get_outq_cfg(struct rte_cfgfile *file, int port, int outq,
if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0)
return 0;
+ /* Read scheduling mode */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_SCHED_MODE);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_SCHED_MODE_SP,
+ strlen(MRVL_TOK_SCHED_MODE_SP))) {
+ cfg->port[port].outq[outq].sched_mode =
+ PP2_PPIO_SCHED_M_SP;
+ } else if (!strncmp(entry, MRVL_TOK_SCHED_MODE_WRR,
+ strlen(MRVL_TOK_SCHED_MODE_WRR))) {
+ cfg->port[port].outq[outq].sched_mode =
+ PP2_PPIO_SCHED_M_WRR;
+ } else {
+ RTE_LOG(ERR, PMD, "Unknown token: %s\n", entry);
+ return -1;
+ }
+ }
+
+ /* Read wrr weight */
+ if (cfg->port[port].outq[outq].sched_mode == PP2_PPIO_SCHED_M_WRR) {
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_WRR_WEIGHT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].weight = val;
+ }
+ }
+
+ /*
+ * There's no point in setting rate limiting for specific outq as
+ * global port rate limiting has priority.
+ */
+ if (cfg->port[port].rate_limit_enable) {
+ RTE_LOG(WARNING, PMD, "Port %d rate limiting already enabled\n",
+ port);
+ return 0;
+ }
+
entry = rte_cfgfile_get_entry(file, sec_name,
- MRVL_TOK_WEIGHT);
+ MRVL_TOK_RATE_LIMIT_ENABLE);
if (entry) {
if (get_val_securely(entry, &val) < 0)
return -1;
- cfg->port[port].outq[outq].weight = (uint8_t)val;
+ cfg->port[port].outq[outq].rate_limit_enable = val;
+ }
+
+ if (!cfg->port[port].outq[outq].rate_limit_enable)
+ return 0;
+
+ /* Read CBS (in kB) */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_BURST_SIZE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].rate_limit_params.cbs = val;
+ }
+
+ /* Read CIR (in kbps) */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RATE_LIMIT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].rate_limit_params.cir = val;
}
return 0;
@@ -190,7 +242,7 @@ get_entry_values(const char *entry, uint8_t *tab,
return -1;
/* Copy the entry to safely use rte_strsplit(). */
- snprintf(entry_cpy, RTE_DIM(entry_cpy), "%s", entry);
+ strlcpy(entry_cpy, entry, RTE_DIM(entry_cpy));
/*
* If there are more tokens than array size, rte_strsplit will
@@ -324,6 +376,25 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
}
cfg->port[port].tc[tc].dscps = n;
}
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_DEFAULT_COLOR);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN,
+ sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN))) {
+ cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_GREEN;
+ } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW,
+ sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW))) {
+ cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_YELLOW;
+ } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_RED,
+ sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_RED))) {
+ cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_RED;
+ } else {
+ RTE_LOG(ERR, PMD, "Error while parsing: %s\n", entry);
+ return -1;
+ }
+ }
+
return 0;
}
@@ -396,6 +467,118 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
}
entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_ENABLE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_enable = val;
+ }
+
+ if ((*cfg)->port[n].policer_enable) {
+ enum pp2_cls_plcr_token_unit unit;
+
+ /* Read policer token unit */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_UNIT);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_BYTES,
+ sizeof(MRVL_TOK_PLCR_UNIT_BYTES))) {
+ unit = PP2_CLS_PLCR_BYTES_TOKEN_UNIT;
+ } else if (!strncmp(entry,
+ MRVL_TOK_PLCR_UNIT_PACKETS,
+ sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) {
+ unit = PP2_CLS_PLCR_PACKETS_TOKEN_UNIT;
+ } else {
+ RTE_LOG(ERR, PMD, "Unknown token: %s\n",
+ entry);
+ return -1;
+ }
+ (*cfg)->port[n].policer_params.token_unit =
+ unit;
+ }
+
+ /* Read policer color mode */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_COLOR);
+ if (entry) {
+ enum pp2_cls_plcr_color_mode mode;
+
+ if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_BLIND,
+ sizeof(MRVL_TOK_PLCR_COLOR_BLIND))) {
+ mode = PP2_CLS_PLCR_COLOR_BLIND_MODE;
+ } else if (!strncmp(entry,
+ MRVL_TOK_PLCR_COLOR_AWARE,
+ sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) {
+ mode = PP2_CLS_PLCR_COLOR_AWARE_MODE;
+ } else {
+ RTE_LOG(ERR, PMD,
+ "Error in parsing: %s\n",
+ entry);
+ return -1;
+ }
+ (*cfg)->port[n].policer_params.color_mode =
+ mode;
+ }
+
+ /* Read policer cir */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_CIR);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_params.cir = val;
+ }
+
+ /* Read policer cbs */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_CBS);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_params.cbs = val;
+ }
+
+ /* Read policer ebs */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_EBS);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_params.ebs = val;
+ }
+ }
+
+ /*
+ * Read per-port rate limiting. Setting that will
+ * disable per-queue rate limiting.
+ */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_RATE_LIMIT_ENABLE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].rate_limit_enable = val;
+ }
+
+ if ((*cfg)->port[n].rate_limit_enable) {
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_BURST_SIZE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].rate_limit_params.cbs = val;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_RATE_LIMIT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].rate_limit_params.cir = val;
+ }
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
MRVL_TOK_MAPPING_PRIORITY);
if (entry) {
if (!strncmp(entry, MRVL_TOK_VLAN_IP,
@@ -450,16 +633,18 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
* @param param TC parameters entry.
* @param inqs Number of MUSDK in-queues in this TC.
* @param bpool Bpool for this TC.
+ * @param color Default color for this TC.
* @returns 0 in case of success, exits otherwise.
*/
static int
setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs,
- struct pp2_bpool *bpool)
+ struct pp2_bpool *bpool, enum pp2_ppio_color color)
{
struct pp2_ppio_inq_params *inq_params;
param->pkt_offset = MRVL_PKT_OFFS;
param->pools[0] = bpool;
+ param->default_color = color;
inq_params = rte_zmalloc_socket("inq_params",
inqs * sizeof(*inq_params),
@@ -479,6 +664,34 @@ setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs,
}
/**
+ * Setup ingress policer.
+ *
+ * @param priv Port's private data.
+ * @param params Pointer to the policer's configuration.
+ * @returns 0 in case of success, negative values otherwise.
+ */
+static int
+setup_policer(struct mrvl_priv *priv, struct pp2_cls_plcr_params *params)
+{
+ char match[16];
+ int ret;
+
+ snprintf(match, sizeof(match), "policer-%d:%d\n",
+ priv->pp_id, priv->ppio_id);
+ params->match = match;
+
+ ret = pp2_cls_plcr_init(params, &priv->policer);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to setup %s\n", match);
+ return -1;
+ }
+
+ priv->ppio_params.inqs_params.plcr = priv->policer;
+
+ return 0;
+}
+
+/**
* Configure RX Queues in a given port.
*
* Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping.
@@ -496,10 +709,13 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
if (mrvl_qos_cfg == NULL ||
mrvl_qos_cfg->port[portid].use_global_defaults) {
- /* No port configuration, use default: 1 TC, no QoS. */
+ /*
+ * No port configuration, use default: 1 TC, no QoS,
+ * TC color set to green.
+ */
priv->ppio_params.inqs_params.num_tcs = 1;
setup_tc(&priv->ppio_params.inqs_params.tcs_params[0],
- max_queues, priv->bpool);
+ max_queues, priv->bpool, PP2_PPIO_COLOR_GREEN);
/* Direct mapping of queues i.e. 0->0, 1->1 etc. */
for (i = 0; i < max_queues; ++i) {
@@ -597,11 +813,53 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
break;
setup_tc(&priv->ppio_params.inqs_params.tcs_params[i],
port_cfg->tc[i].inqs,
- priv->bpool);
+ priv->bpool, port_cfg->tc[i].color);
}
priv->ppio_params.inqs_params.num_tcs = i;
+ if (port_cfg->policer_enable)
+ return setup_policer(priv, &port_cfg->policer_params);
+
+ return 0;
+}
+
+/**
+ * Configure TX Queues in a given port.
+ *
+ * Sets up TX queues egress scheduler and limiter.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues)
+{
+ /* We need only a subset of configuration. */
+ struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid];
+ int i;
+
+ if (mrvl_qos_cfg == NULL)
+ return 0;
+
+ priv->ppio_params.rate_limit_enable = port_cfg->rate_limit_enable;
+ if (port_cfg->rate_limit_enable)
+ priv->ppio_params.rate_limit_params =
+ port_cfg->rate_limit_params;
+
+ for (i = 0; i < max_queues; i++) {
+ struct pp2_ppio_outq_params *params =
+ &priv->ppio_params.outqs_params.outqs_params[i];
+
+ params->sched_mode = port_cfg->outq[i].sched_mode;
+ params->weight = port_cfg->outq[i].weight;
+ params->rate_limit_enable = port_cfg->outq[i].rate_limit_enable;
+ params->rate_limit_params = port_cfg->outq[i].rate_limit_params;
+ }
+
return 0;
}
diff --git a/drivers/net/mrvl/mrvl_qos.h b/drivers/net/mvpp2/mrvl_qos.h
index ae7508c9..fa9ddecb 100644
--- a/drivers/net/mrvl/mrvl_qos.h
+++ b/drivers/net/mvpp2/mrvl_qos.h
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#ifndef _MRVL_QOS_H_
@@ -48,6 +20,8 @@
/* QoS config. */
struct mrvl_qos_cfg {
struct port_cfg {
+ int rate_limit_enable;
+ struct pp2_ppio_rate_limit_params rate_limit_params;
struct {
uint8_t inq[MRVL_PP2_RXQ_MAX];
uint8_t dscp[MRVL_CP_PER_TC];
@@ -55,15 +29,21 @@ struct mrvl_qos_cfg {
uint8_t inqs;
uint8_t dscps;
uint8_t pcps;
+ enum pp2_ppio_color color;
} tc[MRVL_PP2_TC_MAX];
struct {
+ enum pp2_ppio_outq_sched_mode sched_mode;
uint8_t weight;
+ int rate_limit_enable;
+ struct pp2_ppio_rate_limit_params rate_limit_params;
} outq[MRVL_PP2_RXQ_MAX];
enum pp2_cls_qos_tbl_type mapping_priority;
uint16_t inqs;
uint16_t outqs;
uint8_t default_tc;
uint8_t use_global_defaults;
+ struct pp2_cls_plcr_params policer_params;
+ uint8_t policer_enable;
} port[RTE_MAX_ETHPORTS];
};
@@ -99,6 +79,20 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
uint16_t max_queues);
/**
+ * Configure TX Queues in a given port.
+ *
+ * Sets up TX queues egress scheduler and limiter.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues);
+
+/**
* Start QoS mapping.
*
* Finalize QoS table configuration and initialize it in SDK. It can be done
diff --git a/drivers/net/mrvl/rte_pmd_mrvl_version.map b/drivers/net/mvpp2/rte_pmd_mvpp2_version.map
index a7530317..a7530317 100644
--- a/drivers/net/mrvl/rte_pmd_mrvl_version.map
+++ b/drivers/net/mvpp2/rte_pmd_mvpp2_version.map
diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile
index aa3b68a4..ab4e0a7d 100644
--- a/drivers/net/nfp/Makefile
+++ b/drivers/net/nfp/Makefile
@@ -20,11 +20,24 @@ EXPORT_MAP := rte_pmd_nfp_version.map
LIBABIVER := 1
+VPATH += $(SRCDIR)/nfpcore
+
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cppcore.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cpp_pcie_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mutex.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_resource.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_crc.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mip.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nffw.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_hwinfo.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_rtsym.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_cmds.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_eth.c
+
#
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c
-SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nfpu.c
-SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nspu.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index e5bfde62..faad1ee9 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015 Netronome Systems, Inc.
+ * Copyright (c) 2014-2018 Netronome Systems, Inc.
* All rights reserved.
*
* Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
@@ -55,7 +55,13 @@
#include <rte_alarm.h>
#include <rte_spinlock.h>
-#include "nfp_nfpu.h"
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp_hwinfo.h"
+#include "nfpcore/nfp_mip.h"
+#include "nfpcore/nfp_rtsym.h"
+#include "nfpcore/nfp_nsp.h"
+
#include "nfp_net_pmd.h"
#include "nfp_net_logs.h"
#include "nfp_net_ctrl.h"
@@ -103,13 +109,11 @@ static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
uint16_t reta_size);
static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
+static int nfp_set_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
-/*
- * The offset of the queue controller queues in the PCIe Target. These
- * happen to be at the same offset on the NFP6000 and the NFP3200 so
- * we use a single macro here.
- */
-#define NFP_PCIE_QUEUE(_q) (0x800 * ((_q) & 0xff))
+/* The offset of the queue controller queues in the PCIe Target */
+#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
/* Maximum value which can be added to a queue with one transaction */
#define NFP_QCP_MAX_ADD 0x7f
@@ -213,57 +217,6 @@ nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
}
-/*
- * Atomically reads link status information from global structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &dev->data->dev_link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/*
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static void
nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
{
@@ -310,7 +263,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
for (i = 0; i < txq->tx_count; i++) {
if (txq->txbufs[i].mbuf) {
- rte_pktmbuf_free(txq->txbufs[i].mbuf);
+ rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
txq->txbufs[i].mbuf = NULL;
}
}
@@ -343,7 +296,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
uint32_t new;
struct timespec wait;
- PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
+ PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
hw->qcp_cfg);
if (hw->qcp_cfg == NULL)
@@ -354,7 +307,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
wait.tv_sec = 0;
wait.tv_nsec = 1000000;
- PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
+ PMD_DRV_LOG(DEBUG, "Polling for update ack...");
/* Poll update field, waiting for NFP to ack the config */
for (cnt = 0; ; cnt++) {
@@ -372,7 +325,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
}
nanosleep(&wait, 0); /* waiting for a 1ms */
}
- PMD_DRV_LOG(DEBUG, "Ack DONE\n");
+ PMD_DRV_LOG(DEBUG, "Ack DONE");
return 0;
}
@@ -390,7 +343,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
{
uint32_t err;
- PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
+ PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
ctrl, update);
rte_spinlock_lock(&hw->reconfig_lock);
@@ -427,8 +380,6 @@ nfp_net_configure(struct rte_eth_dev *dev)
struct rte_eth_conf *dev_conf;
struct rte_eth_rxmode *rxmode;
struct rte_eth_txmode *txmode;
- uint32_t new_ctrl = 0;
- uint32_t update = 0;
struct nfp_net_hw *hw;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -454,96 +405,15 @@ nfp_net_configure(struct rte_eth_dev *dev)
}
/* Checking RX mode */
- if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
- if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
- update = NFP_NET_CFG_UPDATE_RSS;
- new_ctrl = NFP_NET_CFG_CTRL_RSS;
- } else {
- PMD_INIT_LOG(INFO, "RSS not supported");
- return -EINVAL;
- }
- }
-
- if (rxmode->split_hdr_size) {
- PMD_INIT_LOG(INFO, "rxmode does not support split header");
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+ !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
+ PMD_INIT_LOG(INFO, "RSS not supported");
return -EINVAL;
}
- if (rxmode->hw_ip_checksum) {
- if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
- new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
- } else {
- PMD_INIT_LOG(INFO, "RXCSUM not supported");
- return -EINVAL;
- }
- }
-
- if (rxmode->hw_vlan_filter) {
- PMD_INIT_LOG(INFO, "VLAN filter not supported");
- return -EINVAL;
- }
-
- if (rxmode->hw_vlan_strip) {
- if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
- new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
- } else {
- PMD_INIT_LOG(INFO, "hw vlan strip not supported");
- return -EINVAL;
- }
- }
-
- if (rxmode->hw_vlan_extend) {
- PMD_INIT_LOG(INFO, "VLAN extended not supported");
- return -EINVAL;
- }
-
- if (rxmode->jumbo_frame)
- hw->mtu = rxmode->max_rx_pkt_len;
-
- if (!rxmode->hw_strip_crc)
- PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable");
-
- if (rxmode->enable_scatter) {
- PMD_INIT_LOG(INFO, "Scatter not supported");
- return -EINVAL;
- }
-
- /* If next capabilities are supported, configure them by default */
-
- /* VLAN insertion */
- if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
- new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
-
- /* L2 broadcast */
- if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
- new_ctrl |= NFP_NET_CFG_CTRL_L2BC;
-
- /* L2 multicast */
- if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
- new_ctrl |= NFP_NET_CFG_CTRL_L2MC;
-
- /* TX checksum offload */
- if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
- new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
-
- /* LSO offload */
- if (hw->cap & NFP_NET_CFG_CTRL_LSO)
- new_ctrl |= NFP_NET_CFG_CTRL_LSO;
-
- /* RX gather */
- if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
- new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
-
- if (!new_ctrl)
- return 0;
-
- update |= NFP_NET_CFG_UPDATE_GEN;
-
- nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
- if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
- return -EIO;
-
- hw->ctrl = new_ctrl;
+ /* Checking RX offloads */
+ if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
+ PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
return 0;
}
@@ -625,47 +495,29 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
#define ETH_ADDR_LEN 6
static void
-nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
+nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
{
int i;
for (i = 0; i < ETH_ADDR_LEN; i++)
- dst[ETH_ADDR_LEN - i - 1] = src[i];
+ dst[i] = src[i];
}
static int
nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
{
- union eth_table_entry *entry;
- int idx, i;
-
- idx = port;
- entry = hw->eth_table;
-
- /* Reading NFP ethernet table obtained before */
- for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
- if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
- /* port not in use */
- entry++;
- continue;
- }
- if (idx == 0)
- break;
- idx--;
- entry++;
- }
-
- if (i == NSP_ETH_MAX_COUNT)
- return -EINVAL;
+ struct nfp_eth_table *nfp_eth_table;
+ nfp_eth_table = nfp_eth_read_ports(hw->cpp);
/*
* hw points to port0 private data. We need hw now pointing to
* right port.
*/
hw += port;
- nfp_eth_copy_mac_reverse((uint8_t *)&hw->mac_addr,
- (uint8_t *)&entry->mac_addr);
+ nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
+ (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
+ free(nfp_eth_table);
return 0;
}
@@ -675,7 +527,7 @@ nfp_net_vf_read_mac(struct nfp_net_hw *hw)
uint32_t tmp;
tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
- memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
+ memcpy(&hw->mac_addr[0], &tmp, 4);
tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
memcpy(&hw->mac_addr[4], &tmp, 2);
@@ -695,6 +547,37 @@ nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
}
+int
+nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct nfp_net_hw *hw;
+ uint32_t update, ctrl;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
+ PMD_INIT_LOG(INFO, "MAC address unable to change when"
+ " port enabled");
+ return -EBUSY;
+ }
+
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
+ return -EBUSY;
+
+ /* Writing new MAC to the specific port BAR address */
+ nfp_net_write_mac(hw, (uint8_t *)mac_addr);
+
+ /* Signal the NIC about the change */
+ update = NFP_NET_CFG_UPDATE_MACADDR;
+ ctrl = hw->ctrl | NFP_NET_CFG_CTRL_LIVE_ADDR;
+ if (nfp_net_reconfig(hw, ctrl, update) < 0) {
+ PMD_INIT_LOG(INFO, "MAC address update failed");
+ return -EIO;
+ }
+ return 0;
+}
+
static int
nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
struct rte_intr_handle *intr_handle)
@@ -729,7 +612,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
*/
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
intr_handle->intr_vec[i] = i + 1;
- PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i,
+ PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
intr_handle->intr_vec[i]);
}
}
@@ -739,15 +622,75 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
return 0;
}
+static uint32_t
+nfp_check_offloads(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
+ struct rte_eth_txmode *txmode;
+ uint32_t ctrl = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+ txmode = &dev_conf->txmode;
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+ ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
+ ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ hw->mtu = rxmode->max_rx_pkt_len;
+
+ if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+
+ /* L2 broadcast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+ ctrl |= NFP_NET_CFG_CTRL_L2BC;
+
+ /* L2 multicast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+ ctrl |= NFP_NET_CFG_CTRL_L2MC;
+
+ /* TX checksum offload */
+ if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+ txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
+ txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+ ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+
+ /* LSO offload */
+ if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+ ctrl |= NFP_NET_CFG_CTRL_LSO;
+ else
+ ctrl |= NFP_NET_CFG_CTRL_LSO2;
+ }
+
+ /* RX gather */
+ if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ ctrl |= NFP_NET_CFG_CTRL_GATHER;
+
+ return ctrl;
+}
+
static int
nfp_net_start(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- struct rte_eth_conf *dev_conf;
- struct rte_eth_rxmode *rxmode;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
uint32_t intr_vector;
int ret;
@@ -758,9 +701,6 @@ nfp_net_start(struct rte_eth_dev *dev)
/* Disabling queues just in case... */
nfp_net_disable_queues(dev);
- /* Writing configuration parameters in the device */
- nfp_net_params_setup(hw);
-
/* Enabling the required queues in the device */
nfp_net_enable_queues(dev);
@@ -795,21 +735,22 @@ nfp_net_start(struct rte_eth_dev *dev)
rte_intr_enable(intr_handle);
+ new_ctrl = nfp_check_offloads(dev);
+
+ /* Writing configuration parameters in the device */
+ nfp_net_params_setup(hw);
+
dev_conf = &dev->data->dev_conf;
rxmode = &dev_conf->rxmode;
- /* Checking RX mode */
if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
- if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
- if (!nfp_net_rss_config_default(dev))
- update |= NFP_NET_CFG_UPDATE_RSS;
- } else {
- PMD_INIT_LOG(INFO, "RSS not supported");
- return -EINVAL;
- }
+ nfp_net_rss_config_default(dev);
+ update |= NFP_NET_CFG_UPDATE_RSS;
+ new_ctrl |= NFP_NET_CFG_CTRL_RSS;
}
+
/* Enable device */
- new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
+ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
@@ -831,7 +772,7 @@ nfp_net_start(struct rte_eth_dev *dev)
if (hw->is_pf)
/* Configure the physical port up */
- nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 1);
+ nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
hw->ctrl = new_ctrl;
@@ -882,7 +823,7 @@ nfp_net_stop(struct rte_eth_dev *dev)
if (hw->is_pf)
/* Configure the physical port down */
- nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 0);
+ nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
}
/* Reset and stop device. The device can not be restarted. */
@@ -936,7 +877,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
- PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n");
+ PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -946,7 +887,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
}
if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
- PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n");
+ PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
return;
}
@@ -972,7 +913,7 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
- PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n");
+ PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
return;
}
@@ -999,8 +940,9 @@ static int
nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
struct nfp_net_hw *hw;
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
uint32_t nn_link_status;
+ int ret;
static const uint32_t ls_to_ethtool[] = {
[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
@@ -1013,13 +955,10 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
[NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
};
- PMD_DRV_LOG(DEBUG, "Link update\n");
+ PMD_DRV_LOG(DEBUG, "Link update");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- memset(&old, 0, sizeof(old));
- nfp_net_dev_atomic_read_link_status(dev, &old);
-
nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
memset(&link, 0, sizeof(struct rte_eth_link));
@@ -1037,16 +976,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
else
link.link_speed = ls_to_ethtool[nn_link_status];
- if (old.link_status != link.link_status) {
- nfp_net_dev_atomic_write_link_status(dev, &link);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == 0) {
if (link.link_status)
- PMD_DRV_LOG(INFO, "NIC Link is Up\n");
+ PMD_DRV_LOG(INFO, "NIC Link is Up");
else
- PMD_DRV_LOG(INFO, "NIC Link is Down\n");
- return 0;
+ PMD_DRV_LOG(INFO, "NIC Link is Down");
}
-
- return -1;
+ return ret;
}
static int
@@ -1214,7 +1151,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1230,6 +1166,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
@@ -1238,6 +1176,12 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = DEFAULT_RX_PTHRESH,
@@ -1256,8 +1200,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
};
dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
@@ -1268,12 +1210,9 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
- dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
- ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
- ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
-
- if (hw->cap & NFP_NET_CFG_CTRL_LSO)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
}
static const uint32_t *
@@ -1376,18 +1315,17 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- memset(&link, 0, sizeof(link));
- nfp_net_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
if (link.link_status)
- RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
- dev->data->port_id, link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX
- ? "full-duplex" : "half-duplex");
+ PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+ dev->data->port_id, link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX
+ ? "full-duplex" : "half-duplex");
else
- RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
- dev->data->port_id);
+ PMD_DRV_LOG(INFO, " Port %d: Link Down",
+ dev->data->port_id);
- RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
+ PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
}
@@ -1428,11 +1366,9 @@ nfp_net_dev_interrupt_handler(void *param)
struct rte_eth_link link;
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
+ PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
- /* get the link status */
- memset(&link, 0, sizeof(link));
- nfp_net_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
nfp_net_link_update(dev, 0);
@@ -1449,7 +1385,7 @@ nfp_net_dev_interrupt_handler(void *param)
if (rte_eal_alarm_set(timeout * 1000,
nfp_net_dev_interrupt_delayed_handler,
(void *)dev) < 0) {
- RTE_LOG(ERR, PMD, "Error setting alarm");
+ PMD_INIT_LOG(ERR, "Error setting alarm");
/* Unmasking */
nfp_net_irq_unmask(dev);
}
@@ -1534,7 +1470,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
(nb_desc > NFP_NET_MAX_RX_DESC) ||
(nb_desc < NFP_NET_MIN_RX_DESC)) {
- RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
+ PMD_DRV_LOG(ERR, "Wrong nb_desc value");
return -EINVAL;
}
@@ -1572,8 +1508,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_count = nb_desc;
rxq->port_id = dev->data->port_id;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
- : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
/*
@@ -1587,7 +1521,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
socket_id);
if (tz == NULL) {
- RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
+ PMD_DRV_LOG(ERR, "Error allocatig rx dma");
nfp_net_rx_queue_release(rxq);
return -ENOMEM;
}
@@ -1605,7 +1539,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
- PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
+ PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
nfp_net_reset_rx_queue(rxq);
@@ -1630,7 +1564,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
uint64_t dma_addr;
unsigned i;
- PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",
+ PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors",
rxq->rx_count);
for (i = 0; i < rxq->rx_count; i++) {
@@ -1638,7 +1572,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
if (mbuf == NULL) {
- RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
(unsigned)rxq->qidx);
return -ENOMEM;
}
@@ -1650,14 +1584,14 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
rxe[i].mbuf = mbuf;
- PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
+ PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
}
/* Make sure all writes are flushed before telling the hardware */
rte_wmb();
/* Not advertising the whole ring as the firmware gets confused if so */
- PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n",
+ PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
rxq->rx_count - 1);
nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
@@ -1683,7 +1617,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
(nb_desc > NFP_NET_MAX_TX_DESC) ||
(nb_desc < NFP_NET_MIN_TX_DESC)) {
- RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
+ PMD_DRV_LOG(ERR, "Wrong nb_desc value");
return -EINVAL;
}
@@ -1692,10 +1626,10 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
DEFAULT_TX_FREE_THRESH);
if (tx_free_thresh > (nb_desc)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"tx_free_thresh must be less than the number of TX "
"descriptors. (tx_free_thresh=%u port=%d "
- "queue=%d)\n", (unsigned int)tx_free_thresh,
+ "queue=%d)", (unsigned int)tx_free_thresh,
dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
@@ -1705,7 +1639,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
* calling nfp_net_stop
*/
if (dev->data->tx_queues[queue_idx]) {
- PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n",
+ PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
queue_idx);
nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
@@ -1715,7 +1649,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL) {
- RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
+ PMD_DRV_LOG(ERR, "Error allocating tx dma");
return -ENOMEM;
}
@@ -1729,7 +1663,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
socket_id);
if (tz == NULL) {
- RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
+ PMD_DRV_LOG(ERR, "Error allocating tx dma");
nfp_net_tx_queue_release(txq);
return -ENOMEM;
}
@@ -1746,7 +1680,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
/* Saving physical and virtual addresses for the TX ring */
txq->dma = (uint64_t)tz->iova;
@@ -1760,7 +1693,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
nfp_net_tx_queue_release(txq);
return -ENOMEM;
}
- PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
+ PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
txq->txbufs, txq->txds, (unsigned long int)txq->dma);
nfp_net_reset_tx_queue(txq);
@@ -1786,7 +1719,7 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
uint64_t ol_flags;
struct nfp_net_hw *hw = txq->hw;
- if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
+ if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
goto clean_txd;
ol_flags = mb->ol_flags;
@@ -1794,15 +1727,19 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
if (!(ol_flags & PKT_TX_TCP_SEG))
goto clean_txd;
- txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
- txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
+ txd->l3_offset = mb->l2_len;
+ txd->l4_offset = mb->l2_len + mb->l3_len;
+ txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
+ txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
txd->flags = PCIE_DESC_TX_LSO;
return;
clean_txd:
txd->flags = 0;
+ txd->l3_offset = 0;
txd->l4_offset = 0;
- txd->lso = 0;
+ txd->lso_hdrlen = 0;
+ txd->mss = 0;
}
/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
@@ -1888,14 +1825,10 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
return;
- if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) {
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
- return;
-
- hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
- hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
-
- } else if (NFP_DESC_META_LEN(rxd)) {
+ /* this is true for new firmwares */
+ if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
+ (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
+ NFP_DESC_META_LEN(rxd))) {
/*
* new metadata api:
* <---- 32 bit ----->
@@ -1928,7 +1861,11 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
return;
}
} else {
- return;
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
+ hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
}
mbuf->hash.rss = hash;
@@ -2019,16 +1956,16 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
}
+ rxds = &rxq->rxds[rxq->rd_p];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
/*
* Memory barrier to ensure that we won't do other
* reads before the DD bit.
*/
rte_rmb();
- rxds = &rxq->rxds[rxq->rd_p];
- if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
- break;
-
/*
* We got a packet. Let's alloc a new mbuff for refilling the
* free descriptor ring as soon as possible
@@ -2051,7 +1988,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
mb = rxb->mbuf;
rxb->mbuf = new_mb;
- PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n",
+ PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
rxds->rxd.data_len, rxq->mbuf_size);
/* Size of this segment */
@@ -2089,6 +2026,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
mb->nb_segs = 1;
mb->next = NULL;
+ mb->port = rxq->port_id;
+
/* Checking the RSS flag */
nfp_net_set_hash(rxq, rxds, mb);
@@ -2120,7 +2059,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (nb_hold == 0)
return nb_hold;
- PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
+ PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received",
rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
nb_hold += rxq->nb_rx_hold;
@@ -2131,7 +2070,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
rte_wmb();
if (nb_hold > rxq->rx_free_thresh) {
- PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
+ PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
rxq->port_id, (unsigned int)rxq->qidx,
(unsigned)nb_hold, (unsigned)avail);
nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
@@ -2155,14 +2094,14 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
int todo;
PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
- " status\n", txq->qidx);
+ " status", txq->qidx);
/* Work out how many packets have been sent */
qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
if (qcp_rd_p == txq->rd_p) {
PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
- "packets (%u, %u)\n", txq->qidx,
+ "packets (%u, %u)", txq->qidx,
qcp_rd_p, txq->rd_p);
return 0;
}
@@ -2172,7 +2111,7 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
else
todo = qcp_rd_p + txq->tx_count - txq->rd_p;
- PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n",
+ PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
qcp_rd_p, txq->rd_p, txq->rd_p);
if (todo == 0)
@@ -2226,7 +2165,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
hw = txq->hw;
txds = &txq->txds[txq->wr_p];
- PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
+ PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
txq->qidx, txq->wr_p, nb_pkts);
if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
@@ -2240,7 +2179,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
i = 0;
issued_descs = 0;
- PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n",
+ PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
txq->qidx, nb_pkts);
/* Sending packets */
while ((i < nb_pkts) && free_descs) {
@@ -2299,7 +2238,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
dma_size = pkt->data_len;
dma_addr = rte_mbuf_data_iova(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
- "%" PRIx64 "\n", dma_addr);
+ "%" PRIx64 "", dma_addr);
/* Filling descriptors fields */
txds->dma_len = dma_size;
@@ -2349,7 +2288,7 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
(mask & ETH_VLAN_EXTEND_OFFLOAD))
- RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
+ PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or"
" ETH_VLAN_EXTEND_OFFLOAD");
/* Enable vlan strip if it is not configured yet */
@@ -2386,9 +2325,9 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
- RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+ "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
return -EINVAL;
}
@@ -2467,9 +2406,9 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
return -EINVAL;
if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
- RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+ "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
return -EINVAL;
}
@@ -2555,14 +2494,14 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
/* Checking if RSS is enabled */
if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
if (rss_hf != 0) { /* Enable RSS? */
- RTE_LOG(ERR, PMD, "RSS unsupported\n");
+ PMD_DRV_LOG(ERR, "RSS unsupported");
return -EINVAL;
}
return 0; /* Nothing to do */
}
if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
- RTE_LOG(ERR, PMD, "hash key too long\n");
+ PMD_DRV_LOG(ERR, "hash key too long");
return -EINVAL;
}
@@ -2634,7 +2573,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev)
uint16_t queue;
int i, j, ret;
- RTE_LOG(INFO, PMD, "setting default RSS conf for %u queues\n",
+ PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
rx_queues);
nfp_reta_conf[0].mask = ~0x0;
@@ -2654,7 +2593,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev)
dev_conf = &dev->data->dev_conf;
if (!dev_conf) {
- RTE_LOG(INFO, PMD, "wrong rss conf");
+ PMD_DRV_LOG(INFO, "wrong rss conf");
return -EINVAL;
}
rss_conf = dev_conf->rx_adv_conf.rss_conf;
@@ -2679,6 +2618,7 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.dev_infos_get = nfp_net_infos_get,
.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
.mtu_set = nfp_net_dev_mtu_set,
+ .mac_addr_set = nfp_set_mac_addr,
.vlan_offload_set = nfp_net_vlan_offload_set,
.reta_update = nfp_net_reta_update,
.reta_query = nfp_net_reta_query,
@@ -2734,10 +2674,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
uint64_t tx_bar_off = 0, rx_bar_off = 0;
uint32_t start_q;
int stride = 4;
-
- nspu_desc_t *nspu_desc = NULL;
- uint64_t bar_offset;
int port = 0;
+ int err;
PMD_INIT_FUNC_TRACE();
@@ -2747,18 +2685,17 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
(pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
port = get_pf_port_number(eth_dev->data->name);
if (port < 0 || port > 7) {
- RTE_LOG(ERR, PMD, "Port value is wrong\n");
+ PMD_DRV_LOG(ERR, "Port value is wrong");
return -ENODEV;
}
- PMD_INIT_LOG(DEBUG, "Working with PF port value %d\n", port);
+ PMD_INIT_LOG(DEBUG, "Working with PF port value %d", port);
/* This points to port 0 private data */
hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
/* This points to the specific port private data */
hw = &hwport0[port];
- hw->pf_port_idx = port;
} else {
hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
hwport0 = 0;
@@ -2786,26 +2723,21 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
if (hw->ctrl_bar == NULL) {
- RTE_LOG(ERR, PMD,
- "hw->ctrl_bar is NULL. BAR0 not configured\n");
+ PMD_DRV_LOG(ERR,
+ "hw->ctrl_bar is NULL. BAR0 not configured");
return -ENODEV;
}
if (hw->is_pf && port == 0) {
- nspu_desc = hw->nspu_desc;
-
- if (nfp_nsp_map_ctrl_bar(nspu_desc, &bar_offset) != 0) {
- /*
- * A firmware should be there after PF probe so this
- * should not happen.
- */
- RTE_LOG(ERR, PMD, "PF BAR symbol resolution failed\n");
- return -ENODEV;
+ hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0",
+ hw->total_ports * 32768,
+ &hw->ctrl_area);
+ if (!hw->ctrl_bar) {
+ printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar");
+ return -EIO;
}
- /* vNIC PF control BAR is a subset of PF PCI device BAR */
- hw->ctrl_bar += bar_offset;
- PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
}
if (port > 0) {
@@ -2817,7 +2749,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
(port * NFP_PF_CSR_SLICE_SIZE);
}
- PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
@@ -2828,31 +2760,34 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
case PCI_DEVICE_ID_NFP6000_PF_NIC:
case PCI_DEVICE_ID_NFP6000_VF_NIC:
start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
- tx_bar_off = NFP_PCIE_QUEUE(start_q);
+ tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
- rx_bar_off = NFP_PCIE_QUEUE(start_q);
+ rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
break;
default:
- RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
- return -ENODEV;
+ PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
+ err = -ENODEV;
+ goto dev_err_ctrl_map;
}
- PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "\n", tx_bar_off);
- PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "\n", rx_bar_off);
+ PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
+ PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
if (hw->is_pf && port == 0) {
/* configure access to tx/rx vNIC BARs */
- nfp_nsp_map_queues_bar(nspu_desc, &bar_offset);
- PMD_INIT_LOG(DEBUG, "tx/rx bar_offset: %" PRIx64 "\n",
- bar_offset);
- hwport0->hw_queues = (uint8_t *)pci_dev->mem_resource[0].addr;
-
- /* vNIC PF tx/rx BARs are a subset of PF PCI device */
- hwport0->hw_queues += bar_offset;
+ hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0,
+ NFP_PCIE_QUEUE(0),
+ NFP_QCP_QUEUE_AREA_SZ,
+ &hw->hwqueues_area);
+
+ if (!hwport0->hw_queues) {
+ printf("nfp_rtsym_map fails for net.qc");
+ err = -EIO;
+ goto dev_err_ctrl_map;
+ }
- /* Lets seize the chance to read eth table from hw */
- if (nfp_nsp_eth_read_table(nspu_desc, &hw->eth_table))
- return -ENODEV;
+ PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p",
+ hwport0->hw_queues);
}
if (hw->is_pf) {
@@ -2877,14 +2812,20 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
hw->mtu = ETHER_MTU;
+ /* VLAN insertion is incompatible with LSOv2 */
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
+ hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
+
if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
hw->rx_offset = NFP_NET_RX_OFFSET;
else
hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
- PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
- hw->ver, hw->max_mtu);
- PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap,
+ PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
+ NFD_CFG_MAJOR_VERSION_of(hw->ver),
+ NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
+
+ PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
@@ -2894,8 +2835,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
+ hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
- hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
+ hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
+ hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
+ hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
hw->ctrl = 0;
@@ -2912,7 +2856,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to space for MAC address");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto dev_err_queues_map;
}
if (hw->is_pf) {
@@ -2923,6 +2868,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
}
if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
+ PMD_INIT_LOG(INFO, "Using random mac address for port %d",
+ port);
/* Using random mac addresses for VFs */
eth_random_addr(&hw->mac_addr[0]);
nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
@@ -2951,11 +2898,19 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
nfp_net_stats_reset(eth_dev);
return 0;
+
+dev_err_queues_map:
+ nfp_cpp_area_free(hw->hwqueues_area);
+dev_err_ctrl_map:
+ nfp_cpp_area_free(hw->ctrl_area);
+
+ return err;
}
static int
nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
- nfpu_desc_t *nfpu_desc, void **priv)
+ struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo,
+ int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv)
{
struct rte_eth_dev *eth_dev;
struct nfp_net_hw *hw;
@@ -2993,12 +2948,16 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
* Then dev_private is adjusted per port.
*/
hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
- hw->nspu_desc = nfpu_desc->nspu;
- hw->nfpu_desc = nfpu_desc;
+ hw->cpp = cpp;
+ hw->hwinfo = hwinfo;
+ hw->sym_tbl = sym_tbl;
+ hw->pf_port_idx = phys_port;
hw->is_pf = 1;
if (ports > 1)
hw->pf_multiport_enabled = 1;
+ hw->total_ports = ports;
+
eth_dev->device = &dev->device;
rte_eth_copy_pci_info(eth_dev, dev);
@@ -3006,82 +2965,228 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
if (ret)
rte_eth_dev_release_port(eth_dev);
+ else
+ rte_eth_dev_probing_finish(eth_dev);
rte_free(port_name);
return ret;
}
+#define DEFAULT_FW_PATH "/lib/firmware/netronome"
+
+static int
+nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
+{
+ struct nfp_cpp *cpp = nsp->cpp;
+ int fw_f;
+ char *fw_buf;
+ char fw_name[125];
+ char serial[40];
+ struct stat file_stat;
+ off_t fsize, bytes;
+
+ /* Looking for firmware file in order of priority */
+
+ /* First try to find a firmware image specific for this device */
+ sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
+ cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
+ cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
+ cpp->interface & 0xff);
+
+ sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
+
+ PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+ fw_f = open(fw_name, O_RDONLY);
+ if (fw_f > 0)
+ goto read_fw;
+
+ /* Then try the PCI name */
+ sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name);
+
+ PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+ fw_f = open(fw_name, O_RDONLY);
+ if (fw_f > 0)
+ goto read_fw;
+
+ /* Finally try the card type and media */
+ sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card);
+ PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+ fw_f = open(fw_name, O_RDONLY);
+ if (fw_f < 0) {
+ PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
+ return -ENOENT;
+ }
+
+read_fw:
+ if (fstat(fw_f, &file_stat) < 0) {
+ PMD_DRV_LOG(INFO, "Firmware file %s size is unknown", fw_name);
+ close(fw_f);
+ return -ENOENT;
+ }
+
+ fsize = file_stat.st_size;
+ PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %" PRIu64 "",
+ fw_name, (uint64_t)fsize);
+
+ fw_buf = malloc((size_t)fsize);
+ if (!fw_buf) {
+ PMD_DRV_LOG(INFO, "malloc failed for fw buffer");
+ close(fw_f);
+ return -ENOMEM;
+ }
+ memset(fw_buf, 0, fsize);
+
+ bytes = read(fw_f, fw_buf, fsize);
+ if (bytes != fsize) {
+ PMD_DRV_LOG(INFO, "Reading fw to buffer failed."
+ "Just %" PRIu64 " of %" PRIu64 " bytes read",
+ (uint64_t)bytes, (uint64_t)fsize);
+ free(fw_buf);
+ close(fw_f);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(INFO, "Uploading the firmware ...");
+ nfp_nsp_load_fw(nsp, fw_buf, bytes);
+ PMD_DRV_LOG(INFO, "Done");
+
+ free(fw_buf);
+ close(fw_f);
+
+ return 0;
+}
+
+static int
+nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
+ struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
+{
+ struct nfp_nsp *nsp;
+ const char *nfp_fw_model;
+ char card_desc[100];
+ int err = 0;
+
+ nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
+
+ if (nfp_fw_model) {
+ PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
+ } else {
+ PMD_DRV_LOG(ERR, "firmware model NOT found");
+ return -EIO;
+ }
+
+ if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
+ PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
+ nfp_eth_table->count);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
+ nfp_eth_table->count);
+
+ PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
+
+ sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model,
+ nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000);
+
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp) {
+ PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
+ return -EIO;
+ }
+
+ nfp_nsp_device_soft_reset(nsp);
+ err = nfp_fw_upload(dev, nsp, card_desc);
+
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *dev)
{
- nfpu_desc_t *nfpu_desc;
- nspu_desc_t *nspu_desc;
- uint64_t offset_symbol;
- uint8_t *bar_offset;
- int major, minor;
+ struct nfp_cpp *cpp;
+ struct nfp_hwinfo *hwinfo;
+ struct nfp_rtsym_table *sym_tbl;
+ struct nfp_eth_table *nfp_eth_table = NULL;
int total_ports;
void *priv = 0;
int ret = -ENODEV;
+ int err;
int i;
if (!dev)
return ret;
- nfpu_desc = rte_malloc("nfp nfpu", sizeof(nfpu_desc_t), 0);
- if (!nfpu_desc)
- return -ENOMEM;
+ /*
+ * When device bound to UIO, the device could be used, by mistake,
+ * by two DPDK apps, and the UIO driver does not avoid it. This
+ * could lead to a serious problem when configuring the NFP CPP
+ * interface. Here we avoid this telling to the CPP init code to
+ * use a lock file if UIO is being used.
+ */
+ if (dev->kdrv == RTE_KDRV_VFIO)
+ cpp = nfp_cpp_from_device_name(dev->device.name, 0);
+ else
+ cpp = nfp_cpp_from_device_name(dev->device.name, 1);
- if (nfpu_open(dev, nfpu_desc, 0) < 0) {
- RTE_LOG(ERR, PMD,
- "nfpu_open failed\n");
- goto nfpu_error;
+ if (!cpp) {
+ PMD_DRV_LOG(ERR, "A CPP handle can not be obtained");
+ ret = -EIO;
+ goto error;
}
- nspu_desc = nfpu_desc->nspu;
+ hwinfo = nfp_hwinfo_read(cpp);
+ if (!hwinfo) {
+ PMD_DRV_LOG(ERR, "Error reading hwinfo table");
+ return -EIO;
+ }
+ nfp_eth_table = nfp_eth_read_ports(cpp);
+ if (!nfp_eth_table) {
+ PMD_DRV_LOG(ERR, "Error reading NFP ethernet table");
+ return -EIO;
+ }
- /* Check NSP ABI version */
- if (nfp_nsp_get_abi_version(nspu_desc, &major, &minor) < 0) {
- RTE_LOG(INFO, PMD, "NFP NSP not present\n");
+ if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) {
+ PMD_DRV_LOG(INFO, "Error when uploading firmware");
+ ret = -EIO;
goto error;
}
- PMD_INIT_LOG(INFO, "nspu ABI version: %d.%d\n", major, minor);
- if ((major == 0) && (minor < 20)) {
- RTE_LOG(INFO, PMD, "NFP NSP ABI version too old. Required 0.20 or higher\n");
+ /* Now the symbol table should be there */
+ sym_tbl = nfp_rtsym_table_read(cpp);
+ if (!sym_tbl) {
+ PMD_DRV_LOG(ERR, "Something is wrong with the firmware"
+ " symbol table");
+ ret = -EIO;
goto error;
}
- ret = nfp_nsp_fw_setup(nspu_desc, "nfd_cfg_pf0_num_ports",
- &offset_symbol);
- if (ret)
+ total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
+ if (total_ports != (int)nfp_eth_table->count) {
+ PMD_DRV_LOG(ERR, "Inconsistent number of ports");
+ ret = -EIO;
goto error;
-
- bar_offset = (uint8_t *)dev->mem_resource[0].addr;
- bar_offset += offset_symbol;
- total_ports = (uint32_t)*bar_offset;
- PMD_INIT_LOG(INFO, "Total pf ports: %d\n", total_ports);
+ }
+ PMD_INIT_LOG(INFO, "Total pf ports: %d", total_ports);
if (total_ports <= 0 || total_ports > 8) {
- RTE_LOG(ERR, PMD, "nfd_cfg_pf0_num_ports symbol with wrong value");
+ PMD_DRV_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
ret = -ENODEV;
goto error;
}
for (i = 0; i < total_ports; i++) {
- ret = nfp_pf_create_dev(dev, i, total_ports, nfpu_desc, &priv);
+ ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo,
+ nfp_eth_table->ports[i].index,
+ sym_tbl, &priv);
if (ret)
- goto error;
+ break;
}
- return 0;
-
error:
- nfpu_close(nfpu_desc);
-nfpu_error:
- rte_free(nfpu_desc);
-
+ free(nfp_eth_table);
return ret;
}
@@ -3129,8 +3234,19 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
(pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
port = get_pf_port_number(eth_dev->data->name);
+ /*
+ * hotplug is not possible with multiport PF although freeing
+ * data structures can be done for first port.
+ */
+ if (port != 0)
+ return -ENOTSUP;
hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
hw = &hwport0[port];
+ nfp_cpp_area_free(hw->ctrl_area);
+ nfp_cpp_area_free(hw->hwqueues_area);
+ free(hw->hwinfo);
+ free(hw->sym_tbl);
+ nfp_cpp_free(hw->cpp);
} else {
hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
}
diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h
index 1ebd99ca..21e17da1 100644
--- a/drivers/net/nfp/nfp_net_ctrl.h
+++ b/drivers/net/nfp/nfp_net_ctrl.h
@@ -120,6 +120,9 @@
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* Enable NVGRE */
#define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */
+#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */
+#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */
+#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1 << 31) /* live MAC addr change */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -131,6 +134,7 @@
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
+#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -140,6 +144,8 @@
#define NFP_NET_CFG_LSC 0x0020
#define NFP_NET_CFG_MACADDR 0x0024
+#define NFP_NET_CFG_CTRL_LSO_ANY (NFP_NET_CFG_CTRL_LSO | NFP_NET_CFG_CTRL_LSO2)
+
/*
* Read-only words (0x0030 - 0x0050):
* @NFP_NET_CFG_VERSION: Firmware version number
diff --git a/drivers/net/nfp/nfp_net_eth.h b/drivers/net/nfp/nfp_net_eth.h
deleted file mode 100644
index af57f03c..00000000
--- a/drivers/net/nfp/nfp_net_eth.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2017 Netronome Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * vim:shiftwidth=8:noexpandtab
- *
- * @file dpdk/pmd/nfp_net_eth.h
- *
- * Netronome NFP_NET PDM driver
- */
-
-union eth_table_entry {
- struct {
- uint64_t port;
- uint64_t state;
- uint8_t mac_addr[6];
- uint8_t resv[2];
- uint64_t control;
- };
- uint64_t raw[4];
-};
-
-#ifndef BIT_ULL
-#define BIT_ULL(a) (1ULL << (a))
-#endif
-
-#define NSP_ETH_NBI_PORT_COUNT 24
-#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
-#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * sizeof(union eth_table_entry))
-
-#define NSP_ETH_PORT_LANES 0xf
-#define NSP_ETH_PORT_INDEX 0xff00
-#define NSP_ETH_PORT_LABEL 0x3f000000000000
-#define NSP_ETH_PORT_PHYLABEL 0xfc0000000000000
-
-#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES)
-
-#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0)
-#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
-#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
-#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
-#define NSP_ETH_STATE_RATE 0xf00
-#define NSP_ETH_STATE_INTERFACE 0xff000
-#define NSP_ETH_STATE_MEDIA 0x300000
-#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
-#define NSP_ETH_STATE_ANEG 0x3800000
-
-#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
-#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
-#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
-#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
-#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
-#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
-#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
diff --git a/drivers/net/nfp/nfp_net_logs.h b/drivers/net/nfp/nfp_net_logs.h
index 3fe24e96..9952881c 100644
--- a/drivers/net/nfp/nfp_net_logs.h
+++ b/drivers/net/nfp/nfp_net_logs.h
@@ -36,19 +36,20 @@
extern int nfp_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, nfp_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
#ifdef RTE_LIBRTE_NFP_NET_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s() rx: " fmt, __func__, ## args)
+ RTE_LOG(level, PMD, "%s() rx: " fmt "\n", __func__, ## args)
#else
#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
#endif
#ifdef RTE_LIBRTE_NFP_NET_DEBUG_TX
#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args)
+ RTE_LOG(level, PMD, "%s() tx: " fmt "\n", __func__, ## args)
#define ASSERT(x) if (!(x)) rte_panic("NFP_NET: x")
#else
#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
@@ -58,6 +59,6 @@ extern int nfp_logtype_init;
extern int nfp_logtype_driver;
#define PMD_DRV_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, nfp_logtype_driver, \
- "%s(): " fmt, __func__, ## args)
+ "%s(): " fmt "\n", __func__, ## args)
#endif /* _NFP_NET_LOGS_H_ */
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index 1ae0ea62..c1b044ee 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015 Netronome Systems, Inc.
+ * Copyright (c) 2014-2018 Netronome Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,7 @@ struct nfp_net_adapter;
#define NFP_NET_CRTL_BAR 0
#define NFP_NET_TX_BAR 2
#define NFP_NET_RX_BAR 2
+#define NFP_QCP_QUEUE_AREA_SZ 0x80000
/* Macros for accessing the Queue Controller Peripheral 'CSRs' */
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * 0x800)
@@ -184,18 +185,28 @@ static inline void nn_writeq(uint64_t val, volatile void *addr)
struct nfp_net_tx_desc {
union {
struct {
- uint8_t dma_addr_hi; /* High bits of host buf address */
+ uint8_t dma_addr_hi; /* High bits of host buf address */
__le16 dma_len; /* Length to DMA for this desc */
- uint8_t offset_eop; /* Offset in buf where pkt starts +
+ uint8_t offset_eop; /* Offset in buf where pkt starts +
* highest bit is eop flag.
*/
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
- __le16 lso; /* MSS to be used for LSO */
- uint8_t l4_offset; /* LSO, where the L4 data starts */
- uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */
-
- __le16 vlan; /* VLAN tag to add if indicated */
+ __le16 mss; /* MSS to be used for LSO */
+ uint8_t lso_hdrlen; /* LSO, where the data starts */
+ uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */
+
+ union {
+ struct {
+ /*
+ * L3 and L4 header offsets required
+ * for TSOv2
+ */
+ uint8_t l3_offset;
+ uint8_t l4_offset;
+ };
+ __le16 vlan; /* VLAN tag to add if indicated */
+ };
__le16 data_len; /* Length of frame + meta data */
} __attribute__((__packed__));
__le32 vals[4];
@@ -247,15 +258,13 @@ struct nfp_net_txq {
/*
* At this point 48 bytes have been used for all the fields in the
* TX critical path. We have room for 8 bytes and still all placed
- * in a cache line. We are not using the threshold values below nor
- * the txq_flags but if we need to, we can add the most used in the
- * remaining bytes.
+ * in a cache line. We are not using the threshold values below but
+ * if we need to, we can add the most used in the remaining bytes.
*/
uint32_t tx_rs_thresh; /* not used by now. Future? */
uint32_t tx_pthresh; /* not used by now. Future? */
uint32_t tx_hthresh; /* not used by now. Future? */
uint32_t tx_wthresh; /* not used by now. Future? */
- uint32_t txq_flags; /* not used by now. Future? */
uint16_t port_id;
int qidx;
int tx_qcidx;
@@ -430,20 +439,21 @@ struct nfp_net_hw {
/* Records starting point for counters */
struct rte_eth_stats eth_stats_base;
-#ifdef NFP_NET_LIBNFP
struct nfp_cpp *cpp;
struct nfp_cpp_area *ctrl_area;
- struct nfp_cpp_area *tx_area;
- struct nfp_cpp_area *rx_area;
+ struct nfp_cpp_area *hwqueues_area;
struct nfp_cpp_area *msix_area;
-#endif
+
uint8_t *hw_queues;
uint8_t is_pf;
uint8_t pf_port_idx;
uint8_t pf_multiport_enabled;
+ uint8_t total_ports;
+
union eth_table_entry *eth_table;
- nspu_desc_t *nspu_desc;
- nfpu_desc_t *nfpu_desc;
+
+ struct nfp_hwinfo *hwinfo;
+ struct nfp_rtsym_table *sym_tbl;
};
struct nfp_net_adapter {
diff --git a/drivers/net/nfp/nfp_nfpu.c b/drivers/net/nfp/nfp_nfpu.c
deleted file mode 100644
index f11afef3..00000000
--- a/drivers/net/nfp/nfp_nfpu.c
+++ /dev/null
@@ -1,108 +0,0 @@
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-#include <errno.h>
-#include <sys/file.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <sys/types.h>
-
-#include <rte_bus_pci.h>
-#include <rte_malloc.h>
-
-#include "nfp_nfpu.h"
-
-/* PF BAR and expansion BAR for the NSP interface */
-#define NFP_CFG_PCIE_BAR 0
-#define NFP_CFG_EXP_BAR 7
-
-#define NFP_CFG_EXP_BAR_CFG_BASE 0x30000
-
-/* There could be other NFP userspace tools using the NSP interface.
- * Make sure there is no other process using it and locking the access for
- * avoiding problems.
- */
-static int
-nspv_aquire_process_lock(nfpu_desc_t *desc)
-{
- int rc;
- struct flock lock;
- char lockname[30];
-
- memset(&lock, 0, sizeof(lock));
-
- snprintf(lockname, sizeof(lockname), "/var/lock/nfp%d", desc->nfp);
-
- /* Using S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH */
- desc->lock = open(lockname, O_RDWR | O_CREAT, 0666);
-
- if (desc->lock < 0)
- return desc->lock;
-
- lock.l_type = F_WRLCK;
- lock.l_whence = SEEK_SET;
- rc = -1;
- while (rc != 0) {
- rc = fcntl(desc->lock, F_SETLK, &lock);
- if (rc < 0) {
- if ((errno != EAGAIN) && (errno != EACCES)) {
- close(desc->lock);
- return rc;
- }
- }
- }
-
- return 0;
-}
-
-int
-nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp)
-{
- void *cfg_base, *mem_base;
- size_t barsz;
- int ret = 0;
- int i = 0;
-
- desc->nfp = nfp;
-
- ret = nspv_aquire_process_lock(desc);
- if (ret)
- return -1;
-
- barsz = pci_dev->mem_resource[0].len;
-
- /* barsz in log2 */
- while (barsz >>= 1)
- i++;
-
- barsz = i;
-
- /* Sanity check: we can assume any bar size less than 1MB an error */
- if (barsz < 20)
- return -1;
-
- /* Getting address for NFP expansion BAR registers */
- cfg_base = pci_dev->mem_resource[0].addr;
- cfg_base = (uint8_t *)cfg_base + NFP_CFG_EXP_BAR_CFG_BASE;
-
- /* Getting address for NFP NSP interface registers */
- mem_base = pci_dev->mem_resource[0].addr;
- mem_base = (uint8_t *)mem_base + (NFP_CFG_EXP_BAR << (barsz - 3));
-
-
- desc->nspu = rte_malloc("nfp nspu", sizeof(nspu_desc_t), 0);
- nfp_nspu_init(desc->nspu, desc->nfp, NFP_CFG_PCIE_BAR, barsz,
- NFP_CFG_EXP_BAR, cfg_base, mem_base);
-
- return ret;
-}
-
-int
-nfpu_close(nfpu_desc_t *desc)
-{
- rte_free(desc->nspu);
- close(desc->lock);
- unlink("/var/lock/nfp0");
- return 0;
-}
diff --git a/drivers/net/nfp/nfp_nfpu.h b/drivers/net/nfp/nfp_nfpu.h
deleted file mode 100644
index e56fa099..00000000
--- a/drivers/net/nfp/nfp_nfpu.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2017 Netronome Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * vim:shiftwidth=8:noexpandtab
- *
- * @file dpdk/pmd/nfp_nfpu.h
- *
- * Netronome NFP_NET PDM driver
- */
-
-/*
- * NFP User interface creates a window for talking with NFP NSP processor
- */
-
-
-#include <rte_bus_pci.h>
-#include "nfp_nspu.h"
-
-typedef struct {
- int nfp;
- int lock;
- nspu_desc_t *nspu;
-} nfpu_desc_t;
-
-int nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp);
-int nfpu_close(nfpu_desc_t *desc);
diff --git a/drivers/net/nfp/nfp_nspu.c b/drivers/net/nfp/nfp_nspu.c
deleted file mode 100644
index f9089832..00000000
--- a/drivers/net/nfp/nfp_nspu.c
+++ /dev/null
@@ -1,642 +0,0 @@
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/file.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-
-#include <rte_log.h>
-#include <rte_byteorder.h>
-
-#include "nfp_nfpu.h"
-
-#define CFG_EXP_BAR_ADDR_SZ 1
-#define CFG_EXP_BAR_MAP_TYPE 1
-
-#define EXP_BAR_TARGET_SHIFT 23
-#define EXP_BAR_LENGTH_SHIFT 27 /* 0=32, 1=64 bit increment */
-#define EXP_BAR_MAP_TYPE_SHIFT 29 /* Bulk BAR map */
-
-/* NFP target for NSP access */
-#define NFP_NSP_TARGET 7
-
-/* Expansion BARs for mapping PF vnic BARs */
-#define NFP_NET_PF_CFG_EXP_BAR 6
-#define NFP_NET_PF_HW_QUEUES_EXP_BAR 5
-
-/*
- * This is an NFP internal address used for configuring properly an NFP
- * expansion BAR.
- */
-#define MEM_CMD_BASE_ADDR 0x8100000000
-
-/* NSP interface registers */
-#define NSP_BASE (MEM_CMD_BASE_ADDR + 0x22100)
-#define NSP_STATUS 0x00
-#define NSP_COMMAND 0x08
-#define NSP_BUFFER 0x10
-#define NSP_DEFAULT_BUF 0x18
-#define NSP_DEFAULT_BUF_CFG 0x20
-
-#define NSP_MAGIC 0xab10
-#define NSP_STATUS_MAGIC(x) (((x) >> 48) & 0xffff)
-#define NSP_STATUS_MAJOR(x) (int)(((x) >> 44) & 0xf)
-#define NSP_STATUS_MINOR(x) (int)(((x) >> 32) & 0xfff)
-
-/* NSP commands */
-#define NSP_CMD_RESET 1
-#define NSP_CMD_FW_LOAD 6
-#define NSP_CMD_READ_ETH_TABLE 7
-#define NSP_CMD_WRITE_ETH_TABLE 8
-#define NSP_CMD_GET_SYMBOL 14
-
-#define NSP_BUFFER_CFG_SIZE_MASK (0xff)
-
-#define NSP_REG_ADDR(d, off, reg) ((uint8_t *)(d)->mem_base + (off) + (reg))
-#define NSP_REG_VAL(p) (*(uint64_t *)(p))
-
-/*
- * An NFP expansion BAR is configured for allowing access to a specific NFP
- * target:
- *
- * IN:
- * desc: struct with basic NSP addresses to work with
- * expbar: NFP PF expansion BAR index to configure
- * tgt: NFP target to configure access
- * addr: NFP target address
- *
- * OUT:
- * pcie_offset: NFP PCI BAR offset to work with
- */
-static void
-nfp_nspu_mem_bar_cfg(nspu_desc_t *desc, int expbar, int tgt,
- uint64_t addr, uint64_t *pcie_offset)
-{
- uint64_t x, y, barsz;
- uint32_t *expbar_ptr;
-
- barsz = desc->barsz;
-
- /*
- * NFP CPP address to configure. This comes from NFP 6000
- * datasheet document based on Bulk mapping.
- */
- x = (addr >> (barsz - 3)) << (21 - (40 - (barsz - 3)));
- x |= CFG_EXP_BAR_MAP_TYPE << EXP_BAR_MAP_TYPE_SHIFT;
- x |= CFG_EXP_BAR_ADDR_SZ << EXP_BAR_LENGTH_SHIFT;
- x |= tgt << EXP_BAR_TARGET_SHIFT;
-
- /* Getting expansion bar configuration register address */
- expbar_ptr = (uint32_t *)desc->cfg_base;
- /* Each physical PCI BAR has 8 NFP expansion BARs */
- expbar_ptr += (desc->pcie_bar * 8) + expbar;
-
- /* Writing to the expansion BAR register */
- *expbar_ptr = (uint32_t)x;
-
- /* Getting the pcie offset to work with from userspace */
- y = addr & ((uint64_t)(1 << (barsz - 3)) - 1);
- *pcie_offset = y;
-}
-
-/*
- * Configuring an expansion bar for accessing NSP userspace interface. This
- * function configures always the same expansion bar, which implies access to
- * previously configured NFP target is lost.
- */
-static void
-nspu_xlate(nspu_desc_t *desc, uint64_t addr, uint64_t *pcie_offset)
-{
- nfp_nspu_mem_bar_cfg(desc, desc->exp_bar, NFP_NSP_TARGET, addr,
- pcie_offset);
-}
-
-int
-nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor)
-{
- uint64_t pcie_offset;
- uint64_t nsp_reg;
-
- nspu_xlate(desc, NSP_BASE, &pcie_offset);
- nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, pcie_offset, NSP_STATUS));
-
- if (NSP_STATUS_MAGIC(nsp_reg) != NSP_MAGIC)
- return -1;
-
- *major = NSP_STATUS_MAJOR(nsp_reg);
- *minor = NSP_STATUS_MINOR(nsp_reg);
-
- return 0;
-}
-
-int
-nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz,
- int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap)
-{
- uint64_t offset, buffaddr;
- uint64_t nsp_reg;
-
- desc->nfp = nfp;
- desc->pcie_bar = pcie_bar;
- desc->exp_bar = exp_bar;
- desc->barsz = pcie_barsz;
- desc->windowsz = 1 << (desc->barsz - 3);
- desc->cfg_base = exp_bar_cfg_base;
- desc->mem_base = exp_bar_mmap;
-
- nspu_xlate(desc, NSP_BASE, &offset);
-
- /*
- * Other NSPU clients can use other buffers. Let's tell NSPU we use the
- * default buffer.
- */
- buffaddr = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF));
- NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_BUFFER)) = buffaddr;
-
- /* NFP internal addresses are 40 bits. Clean all other bits here */
- buffaddr = buffaddr & (((uint64_t)1 << 40) - 1);
- desc->bufaddr = buffaddr;
-
- /* Lets get information about the buffer */
- nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF_CFG));
-
- /* Buffer size comes in MBs. Coversion to bytes */
- desc->buf_size = ((size_t)nsp_reg & NSP_BUFFER_CFG_SIZE_MASK) << 20;
-
- return 0;
-}
-
-#define NSPU_NFP_BUF(addr, base, off) \
- (*(uint64_t *)((uint8_t *)(addr)->mem_base + ((base) | (off))))
-
-#define NSPU_HOST_BUF(base, off) (*(uint64_t *)((uint8_t *)(base) + (off)))
-
-static int
-nspu_buff_write(nspu_desc_t *desc, void *buffer, size_t size)
-{
- uint64_t pcie_offset, pcie_window_base, pcie_window_offset;
- uint64_t windowsz = desc->windowsz;
- uint64_t buffaddr, j, i = 0;
- int ret = 0;
-
- if (size > desc->buf_size)
- return -1;
-
- buffaddr = desc->bufaddr;
- windowsz = desc->windowsz;
-
- while (i < size) {
- /* Expansion bar reconfiguration per window size */
- nspu_xlate(desc, buffaddr + i, &pcie_offset);
- pcie_window_base = pcie_offset & (~(windowsz - 1));
- pcie_window_offset = pcie_offset & (windowsz - 1);
- for (j = pcie_window_offset; ((j < windowsz) && (i < size));
- j += 8) {
- NSPU_NFP_BUF(desc, pcie_window_base, j) =
- NSPU_HOST_BUF(buffer, i);
- i += 8;
- }
- }
-
- return ret;
-}
-
-static int
-nspu_buff_read(nspu_desc_t *desc, void *buffer, size_t size)
-{
- uint64_t pcie_offset, pcie_window_base, pcie_window_offset;
- uint64_t windowsz, i = 0, j;
- uint64_t buffaddr;
- int ret = 0;
-
- if (size > desc->buf_size)
- return -1;
-
- buffaddr = desc->bufaddr;
- windowsz = desc->windowsz;
-
- while (i < size) {
- /* Expansion bar reconfiguration per window size */
- nspu_xlate(desc, buffaddr + i, &pcie_offset);
- pcie_window_base = pcie_offset & (~(windowsz - 1));
- pcie_window_offset = pcie_offset & (windowsz - 1);
- for (j = pcie_window_offset; ((j < windowsz) && (i < size));
- j += 8) {
- NSPU_HOST_BUF(buffer, i) =
- NSPU_NFP_BUF(desc, pcie_window_base, j);
- i += 8;
- }
- }
-
- return ret;
-}
-
-static int
-nspu_command(nspu_desc_t *desc, uint16_t cmd, int read, int write,
- void *buffer, size_t rsize, size_t wsize)
-{
- uint64_t status, cmd_reg;
- uint64_t offset;
- int retry = 0;
- int retries = 120;
- int ret = 0;
-
- /* Same expansion BAR is used for different things */
- nspu_xlate(desc, NSP_BASE, &offset);
-
- status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
-
- while ((status & 0x1) && (retry < retries)) {
- status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
- retry++;
- sleep(1);
- }
-
- if (retry == retries)
- return -1;
-
- if (write) {
- ret = nspu_buff_write(desc, buffer, wsize);
- if (ret)
- return ret;
-
- /* Expansion BAR changes when writing the buffer */
- nspu_xlate(desc, NSP_BASE, &offset);
- }
-
- NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND)) =
- (uint64_t)wsize << 32 | (uint64_t)cmd << 16 | 1;
-
- retry = 0;
-
- cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND));
- while ((cmd_reg & 0x1) && (retry < retries)) {
- cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND));
- retry++;
- sleep(1);
- }
- if (retry == retries)
- return -1;
-
- retry = 0;
- status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
- while ((status & 0x1) && (retry < retries)) {
- status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
- retry++;
- sleep(1);
- }
-
- if (retry == retries)
- return -1;
-
- ret = status & (0xff << 8);
- if (ret)
- return ret;
-
- if (read) {
- ret = nspu_buff_read(desc, buffer, rsize);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int
-nfp_fw_reset(nspu_desc_t *nspu_desc)
-{
- int res;
-
- res = nspu_command(nspu_desc, NSP_CMD_RESET, 0, 0, 0, 0, 0);
-
- if (res < 0)
- RTE_LOG(INFO, PMD, "fw reset failed: error %d", res);
-
- return res;
-}
-
-#define DEFAULT_FW_PATH "/lib/firmware/netronome"
-#define DEFAULT_FW_FILENAME "nic_dpdk_default.nffw"
-
-static int
-nfp_fw_upload(nspu_desc_t *nspu_desc)
-{
- int fw_f;
- char *fw_buf;
- char filename[100];
- struct stat file_stat;
- off_t fsize, bytes;
- ssize_t size;
- int ret;
-
- size = nspu_desc->buf_size;
-
- sprintf(filename, "%s/%s", DEFAULT_FW_PATH, DEFAULT_FW_FILENAME);
- fw_f = open(filename, O_RDONLY);
- if (fw_f < 0) {
- RTE_LOG(INFO, PMD, "Firmware file %s/%s not found.",
- DEFAULT_FW_PATH, DEFAULT_FW_FILENAME);
- return -ENOENT;
- }
-
- if (fstat(fw_f, &file_stat) < 0) {
- RTE_LOG(INFO, PMD, "Firmware file %s/%s size is unknown",
- DEFAULT_FW_PATH, DEFAULT_FW_FILENAME);
- close(fw_f);
- return -ENOENT;
- }
-
- fsize = file_stat.st_size;
- RTE_LOG(DEBUG, PMD, "Firmware file with size: %" PRIu64 "\n",
- (uint64_t)fsize);
-
- if (fsize > (off_t)size) {
- RTE_LOG(INFO, PMD, "fw file too big: %" PRIu64
- " bytes (%" PRIu64 " max)",
- (uint64_t)fsize, (uint64_t)size);
- close(fw_f);
- return -EINVAL;
- }
-
- fw_buf = malloc((size_t)size);
- if (!fw_buf) {
- RTE_LOG(INFO, PMD, "malloc failed for fw buffer");
- close(fw_f);
- return -ENOMEM;
- }
- memset(fw_buf, 0, size);
-
- bytes = read(fw_f, fw_buf, fsize);
- if (bytes != fsize) {
- RTE_LOG(INFO, PMD, "Reading fw to buffer failed.\n"
- "Just %" PRIu64 " of %" PRIu64 " bytes read.",
- (uint64_t)bytes, (uint64_t)fsize);
- free(fw_buf);
- close(fw_f);
- return -EIO;
- }
-
- ret = nspu_command(nspu_desc, NSP_CMD_FW_LOAD, 0, 1, fw_buf, 0, bytes);
-
- free(fw_buf);
- close(fw_f);
-
- return ret;
-}
-
-/* Firmware symbol descriptor size */
-#define NFP_SYM_DESC_LEN 40
-
-#define SYMBOL_DATA(b, off) (*(int64_t *)((b) + (off)))
-#define SYMBOL_UDATA(b, off) (*(uint64_t *)((b) + (off)))
-
-/* Firmware symbols contain information about how to access what they
- * represent. It can be as simple as an numeric variable declared at a
- * specific NFP memory, but it can also be more complex structures and
- * related to specific hardware functionalities or components. Target,
- * domain and address allow to create the BAR window for accessing such
- * hw object and size defines the length to map.
- *
- * A vNIC is a network interface implemented inside the NFP and using a
- * subset of device PCI BARs. Specific firmware symbols allow to map those
- * vNIC bars by host drivers like the NFP PMD.
- *
- * Accessing what the symbol represents implies to map the access through
- * a PCI BAR window. NFP expansion BARs are used in this regard through
- * the NSPU interface.
- */
-static int
-nfp_nspu_set_bar_from_symbl(nspu_desc_t *desc, const char *symbl,
- uint32_t expbar, uint64_t *pcie_offset,
- ssize_t *size)
-{
- int64_t type;
- int64_t target;
- int64_t domain;
- uint64_t addr;
- char *sym_buf;
- int ret = 0;
-
- sym_buf = malloc(desc->buf_size);
- if (!sym_buf)
- return -ENOMEM;
-
- strncpy(sym_buf, symbl, strlen(symbl));
- ret = nspu_command(desc, NSP_CMD_GET_SYMBOL, 1, 1, sym_buf,
- NFP_SYM_DESC_LEN, strlen(symbl));
- if (ret) {
- RTE_LOG(DEBUG, PMD, "symbol resolution (%s) failed\n", symbl);
- goto clean;
- }
-
- /* Reading symbol information */
- type = SYMBOL_DATA(sym_buf, 0);
- target = SYMBOL_DATA(sym_buf, 8);
- domain = SYMBOL_DATA(sym_buf, 16);
- addr = SYMBOL_UDATA(sym_buf, 24);
- *size = (ssize_t)SYMBOL_UDATA(sym_buf, 32);
-
- if (type != 1) {
- RTE_LOG(INFO, PMD, "wrong symbol type\n");
- ret = -EINVAL;
- goto clean;
- }
- if (!(target == 7 || target == -7)) {
- RTE_LOG(INFO, PMD, "wrong symbol target\n");
- ret = -EINVAL;
- goto clean;
- }
- if (domain == 8 || domain == 9) {
- RTE_LOG(INFO, PMD, "wrong symbol domain\n");
- ret = -EINVAL;
- goto clean;
- }
-
- /* Adjusting address based on symbol location */
- if ((domain >= 24) && (domain < 28) && (target == 7)) {
- addr = 1ULL << 37 | addr | ((uint64_t)domain & 0x3) << 35;
- } else {
- addr = 1ULL << 39 | addr | ((uint64_t)domain & 0x3f) << 32;
- if (target == -7)
- target = 7;
- }
-
- /* Configuring NFP expansion bar for mapping specific PCI BAR window */
- nfp_nspu_mem_bar_cfg(desc, expbar, target, addr, pcie_offset);
-
- /* This is the PCI BAR offset to use by the host */
- *pcie_offset |= ((expbar & 0x7) << (desc->barsz - 3));
-
-clean:
- free(sym_buf);
- return ret;
-}
-
-int
-nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset)
-{
- ssize_t bar0_sym_size;
-
- /* If the symbol resolution works, it implies a firmware app
- * is already there.
- */
- if (!nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR,
- pcie_offset, &bar0_sym_size))
- return 0;
-
- /* No firmware app detected or not the right one */
- RTE_LOG(INFO, PMD, "No firmware detected. Resetting NFP...\n");
- if (nfp_fw_reset(desc) < 0) {
- RTE_LOG(ERR, PMD, "nfp fw reset failed\n");
- return -ENODEV;
- }
-
- RTE_LOG(INFO, PMD, "Reset done.\n");
- RTE_LOG(INFO, PMD, "Uploading firmware...\n");
-
- if (nfp_fw_upload(desc) < 0) {
- RTE_LOG(ERR, PMD, "nfp fw upload failed\n");
- return -ENODEV;
- }
-
- RTE_LOG(INFO, PMD, "Done.\n");
-
- /* Now the symbol should be there */
- if (nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR,
- pcie_offset, &bar0_sym_size)) {
- RTE_LOG(ERR, PMD, "nfp PF BAR symbol resolution failed\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-int
-nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset)
-{
- ssize_t bar0_sym_size;
-
- if (nfp_nspu_set_bar_from_symbl(desc, "_pf0_net_bar0",
- NFP_NET_PF_CFG_EXP_BAR,
- pcie_offset, &bar0_sym_size))
- return -ENODEV;
-
- return 0;
-}
-
-/*
- * This is a hardcoded fixed NFP internal CPP bus address for the hw queues unit
- * inside the PCIE island.
- */
-#define NFP_CPP_PCIE_QUEUES ((uint64_t)(1ULL << 39) | 0x80000 | \
- ((uint64_t)0x4 & 0x3f) << 32)
-
-/* Configure a specific NFP expansion bar for accessing the vNIC rx/tx BARs */
-void
-nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset)
-{
- nfp_nspu_mem_bar_cfg(desc, NFP_NET_PF_HW_QUEUES_EXP_BAR, 0,
- NFP_CPP_PCIE_QUEUES, pcie_offset);
-
- /* This is the pcie offset to use by the host */
- *pcie_offset |= ((NFP_NET_PF_HW_QUEUES_EXP_BAR & 0x7) << (27 - 3));
-}
-
-int
-nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up)
-{
- union eth_table_entry *entries, *entry;
- int modified;
- int ret, idx;
- int i;
-
- idx = port;
-
- RTE_LOG(INFO, PMD, "Hw ethernet port %d configure...\n", port);
- rte_spinlock_lock(&desc->nsp_lock);
- entries = malloc(NSP_ETH_TABLE_SIZE);
- if (!entries) {
- rte_spinlock_unlock(&desc->nsp_lock);
- return -ENOMEM;
- }
-
- ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, entries,
- NSP_ETH_TABLE_SIZE, 0);
- if (ret) {
- rte_spinlock_unlock(&desc->nsp_lock);
- free(entries);
- return ret;
- }
-
- entry = entries;
-
- for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
- /* ports in use do not appear sequentially in the table */
- if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
- /* entry not in use */
- entry++;
- continue;
- }
- if (idx == 0)
- break;
- idx--;
- entry++;
- }
-
- if (i == NSP_ETH_MAX_COUNT) {
- rte_spinlock_unlock(&desc->nsp_lock);
- free(entries);
- return -EINVAL;
- }
-
- if (up && !(entry->state & NSP_ETH_STATE_CONFIGURED)) {
- entry->control |= NSP_ETH_STATE_CONFIGURED;
- modified = 1;
- }
-
- if (!up && (entry->state & NSP_ETH_STATE_CONFIGURED)) {
- entry->control &= ~NSP_ETH_STATE_CONFIGURED;
- modified = 1;
- }
-
- if (modified) {
- ret = nspu_command(desc, NSP_CMD_WRITE_ETH_TABLE, 0, 1, entries,
- 0, NSP_ETH_TABLE_SIZE);
- if (!ret)
- RTE_LOG(INFO, PMD,
- "Hw ethernet port %d configure done\n", port);
- else
- RTE_LOG(INFO, PMD,
- "Hw ethernet port %d configure failed\n", port);
- }
- rte_spinlock_unlock(&desc->nsp_lock);
- free(entries);
- return ret;
-}
-
-int
-nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table)
-{
- int ret;
-
- if (!table)
- return -EINVAL;
-
- RTE_LOG(INFO, PMD, "Reading hw ethernet table...\n");
-
- /* port 0 allocates the eth table and read it using NSPU */
- *table = malloc(NSP_ETH_TABLE_SIZE);
- if (!*table)
- return -ENOMEM;
-
- ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, *table,
- NSP_ETH_TABLE_SIZE, 0);
- if (ret)
- return ret;
-
- RTE_LOG(INFO, PMD, "Done\n");
-
- return 0;
-}
diff --git a/drivers/net/nfp/nfp_nspu.h b/drivers/net/nfp/nfp_nspu.h
deleted file mode 100644
index 8c33835e..00000000
--- a/drivers/net/nfp/nfp_nspu.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2017 Netronome Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * vim:shiftwidth=8:noexpandtab
- *
- * @file dpdk/pmd/nfp_nspu.h
- *
- * Netronome NFP_NET PDM driver
- */
-
-/*
- * NSP is the NFP Service Processor. NSPU is NSP Userspace interface.
- *
- * NFP NSP helps with firmware/hardware configuration. NSP is another component
- * in NFP programmable processor and accessing it from host requires to firstly
- * configure a specific NFP PCI expansion BAR.
- *
- * Once access is ready, configuration can be done reading and writing
- * from/to a specific PF PCI BAR window. This same interface will allow to
- * create other PCI BAR windows for accessing other NFP components.
- *
- * This file includes low-level functions, using the NSPU interface, and high
- * level functions, invoked by the PMD for using NSP services. This allows
- * firmware upload, vNIC PCI BARs mapping and other low-level configurations
- * like link setup.
- *
- * NSP access is done during initialization and it is not involved at all with
- * the fast path.
- */
-
-#include <rte_spinlock.h>
-#include "nfp_net_eth.h"
-
-typedef struct {
- int nfp; /* NFP device */
- int pcie_bar; /* PF PCI BAR to work with */
- int exp_bar; /* Expansion BAR number used by NSPU */
- int barsz; /* PCIE BAR log2 size */
- uint64_t bufaddr; /* commands buffer address */
- size_t buf_size; /* commands buffer size */
- uint64_t windowsz; /* NSPU BAR window size */
- void *cfg_base; /* Expansion BARs address */
- void *mem_base; /* NSP interface */
- rte_spinlock_t nsp_lock;
-} nspu_desc_t;
-
-int nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz,
- int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap);
-int nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor);
-int nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset);
-int nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset);
-void nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset);
-int nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up);
-int nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table);
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
new file mode 100644
index 00000000..6e380cca
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CPPAT_H__
+#define __NFP_CPPAT_H__
+
+#include "nfp_platform.h"
+#include "nfp_resid.h"
+
+/* This file contains helpers for creating CPP commands
+ *
+ * All magic NFP-6xxx IMB 'mode' numbers here are from:
+ * Databook (1 August 2013)
+ * - System Overview and Connectivity
+ * -- Internal Connectivity
+ * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus
+ * ---- CPP addressing
+ * ----- Table 3.6. CPP Address Translation Mode Commands
+ */
+
+#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2
+
+static inline int
+_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode,
+ int addr40, int isld1, int isld0);
+
+static uint64_t
+_nic_mask64(int msb, int lsb, int at0)
+{
+ uint64_t v;
+ int w = msb - lsb + 1;
+
+ if (w == 64)
+ return ~(uint64_t)0;
+
+ if ((lsb + w) > 64)
+ return 0;
+
+ v = (UINT64_C(1) << w) - 1;
+
+ if (at0)
+ return v;
+
+ return v << lsb;
+}
+
+/* For VQDR, we may not modify the Channel bits, which might overlap
+ * with the Index bit. When it does, we need to ensure that isld0 == isld1.
+ */
+static inline int
+_nfp6000_encode_basic(uint64_t *addr, int dest_island, int cpp_tgt, int mode,
+ int addr40, int isld1, int isld0)
+{
+ uint64_t _u64;
+ int iid_lsb, idx_lsb;
+ int i, v = 0;
+ int isld[2];
+
+ isld[0] = isld0;
+ isld[1] = isld1;
+
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_MU:
+ /* This function doesn't handle MU */
+ return NFP_ERRNO(EINVAL);
+ case NFP6000_CPPTGT_CTXPB:
+ /* This function doesn't handle CTXPB */
+ return NFP_ERRNO(EINVAL);
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case 0:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ /*
+ * In this specific mode we'd rather not modify the
+ * address but we can verify if the existing contents
+ * will point to a valid island.
+ */
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1,
+ isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ /*
+ * If dest_island is invalid, the current address won't
+ * go where expected.
+ */
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ iid_lsb = (addr40) ? 34 : 26;
+
+ /* <39:34> or <31:26> */
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ case 1:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1, isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ /*
+ * If dest_island is invalid, the current address won't
+ * go where expected.
+ */
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ idx_lsb = (addr40) ? 39 : 31;
+ if (dest_island == isld0) {
+ /* Only need to clear the Index bit */
+ *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0);
+ return 0;
+ }
+
+ if (dest_island == isld1) {
+ /* Only need to set the Index bit */
+ *addr |= (UINT64_C(1) << idx_lsb);
+ return 0;
+ }
+
+ return NFP_ERRNO(ENODEV);
+ case 2:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ /* iid<0> = addr<30> = channel<0> */
+ /* channel<1> = addr<31> = Index */
+
+ /*
+ * Special case where we allow channel bits to be set
+ * before hand and with them select an island.
+ * So we need to confirm that it's at least plausible.
+ */
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1, isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ /*
+ * If dest_island is invalid, the current address won't
+ * go where expected.
+ */
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ /*
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ **/
+ isld[0] &= ~1;
+ isld[1] &= ~1;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 1;
+
+ /*
+ * Try each option, take first one that fits. Not sure if we
+ * would want to do some smarter searching and prefer 0 or non-0
+ * island IDs.
+ */
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 2; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+
+ return NFP_ERRNO(ENODEV);
+ case 3:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ /*
+ * iid<0> = addr<29> = data
+ * iid<1> = addr<30> = channel<0>
+ * channel<1> = addr<31> = Index
+ */
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1, isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ isld[0] &= ~3;
+ isld[1] &= ~3;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 2;
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 4; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+ return NFP_ERRNO(ENODEV);
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode,
+ int addr40, int isld1, int isld0)
+{
+ int iid_lsb, idx_lsb;
+
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_MU:
+ /* This function doesn't handle MU */
+ return NFP_ERRNO(EINVAL);
+ case NFP6000_CPPTGT_CTXPB:
+ /* This function doesn't handle CTXPB */
+ return NFP_ERRNO(EINVAL);
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case 0:
+ /*
+ * For VQDR, in this mode for 32-bit addressing it would be
+ * islands 0, 16, 32 and 48 depending on channel and upper
+ * address bits. Since those are not all valid islands, most
+ * decode cases would result in bad island IDs, but we do them
+ * anyway since this is decoding an address that is already
+ * assumed to be used as-is to get to sram.
+ */
+ iid_lsb = (addr40) ? 34 : 26;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ case 1:
+ /*
+ * For VQDR 32-bit, this would decode as:
+ * Channel 0: island#0
+ * Channel 1: island#0
+ * Channel 2: island#1
+ * Channel 3: island#1
+ *
+ * That would be valid as long as both islands have VQDR.
+ * Let's allow this.
+ */
+
+ idx_lsb = (addr40) ? 39 : 31;
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1;
+ else
+ *dest_island = isld0;
+
+ return 0;
+ case 2:
+ /*
+ * For VQDR 32-bit:
+ * Channel 0: (island#0 | 0)
+ * Channel 1: (island#0 | 1)
+ * Channel 2: (island#1 | 0)
+ * Channel 3: (island#1 | 1)
+ *
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ */
+ isld0 &= ~1;
+ isld1 &= ~1;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 1;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 1);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 1);
+
+ return 0;
+ case 3:
+ /*
+ * In this mode the data address starts to affect the island ID
+ * so rather not allow it. In some really specific case one
+ * could use this to send the upper half of the VQDR channel to
+ * another MU, but this is getting very specific. However, as
+ * above for mode 0, this is the decoder and the caller should
+ * validate the resulting IID. This blindly does what the
+ * silicon would do.
+ */
+
+ isld0 &= ~3;
+ isld1 &= ~3;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 2;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 3);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 3);
+
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_cppat_mu_locality_lsb(int mode, int addr40)
+{
+ switch (mode) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ return (addr40) ? 38 : 30;
+ default:
+ break;
+ }
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_encode_mu(uint64_t *addr, int dest_island, int mode, int addr40,
+ int isld1, int isld0)
+{
+ uint64_t _u64;
+ int iid_lsb, idx_lsb, locality_lsb;
+ int i, v;
+ int isld[2];
+ int da;
+
+ isld[0] = isld0;
+ isld[1] = isld1;
+ locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+
+ if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
+ da = 1;
+ else
+ da = 0;
+
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ }
+
+ idx_lsb = (addr40) ? 37 : 29;
+ if (dest_island == isld0) {
+ *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0);
+ return 0;
+ }
+
+ if (dest_island == isld1) {
+ *addr |= (UINT64_C(1) << idx_lsb);
+ return 0;
+ }
+
+ return NFP_ERRNO(ENODEV);
+ case 2:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ }
+
+ /*
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ */
+ isld[0] &= ~1;
+ isld[1] &= ~1;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 1;
+
+ /*
+ * Try each option, take first one that fits. Not sure if we
+ * would want to do some smarter searching and prefer 0 or
+ * non-0 island IDs.
+ */
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 2; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+ return NFP_ERRNO(ENODEV);
+ case 3:
+ /*
+ * Only the EMU will use 40 bit addressing. Silently set the
+ * direct locality bit for everyone else. The SDK toolchain
+ * uses dest_island <= 0 to test for atypical address encodings
+ * to support access to local-island CTM with a 32-but address
+ * (high-locality is effectively ignored and just used for
+ * routing to island #0).
+ */
+ if (dest_island > 0 &&
+ (dest_island < 24 || dest_island > 26)) {
+ *addr |= ((uint64_t)_NIC_NFP6000_MU_LOCALITY_DIRECT)
+ << locality_lsb;
+ da = 1;
+ }
+
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ }
+
+ isld[0] &= ~3;
+ isld[1] &= ~3;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 2;
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 4; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+
+ return NFP_ERRNO(ENODEV);
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_decode_mu(uint64_t addr, int *dest_island, int mode, int addr40,
+ int isld1, int isld0)
+{
+ int iid_lsb, idx_lsb, locality_lsb;
+ int da;
+
+ locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+
+ if (((addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
+ da = 1;
+ else
+ da = 0;
+
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ }
+
+ idx_lsb = (addr40) ? 37 : 29;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1;
+ else
+ *dest_island = isld0;
+
+ return 0;
+ case 2:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ }
+ /*
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ */
+ isld0 &= ~1;
+ isld1 &= ~1;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 1;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 1);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 1);
+
+ return 0;
+ case 3:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ }
+
+ isld0 &= ~3;
+ isld1 &= ~3;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 2;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 3);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 3);
+
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_cppat_addr_encode(uint64_t *addr, int dest_island, int cpp_tgt,
+ int mode, int addr40, int isld1, int isld0)
+{
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_NBI:
+ case NFP6000_CPPTGT_VQDR:
+ case NFP6000_CPPTGT_ILA:
+ case NFP6000_CPPTGT_PCIE:
+ case NFP6000_CPPTGT_ARM:
+ case NFP6000_CPPTGT_CRYPTO:
+ case NFP6000_CPPTGT_CLS:
+ return _nfp6000_encode_basic(addr, dest_island, cpp_tgt, mode,
+ addr40, isld1, isld0);
+
+ case NFP6000_CPPTGT_MU:
+ return _nfp6000_encode_mu(addr, dest_island, mode, addr40,
+ isld1, isld0);
+
+ case NFP6000_CPPTGT_CTXPB:
+ if (mode != 1 || addr40 != 0)
+ return NFP_ERRNO(EINVAL);
+
+ *addr &= ~_nic_mask64(29, 24, 0);
+ *addr |= (((uint64_t)dest_island) << 24) &
+ _nic_mask64(29, 24, 0);
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_cppat_addr_decode(uint64_t addr, int *dest_island, int cpp_tgt,
+ int mode, int addr40, int isld1, int isld0)
+{
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_NBI:
+ case NFP6000_CPPTGT_VQDR:
+ case NFP6000_CPPTGT_ILA:
+ case NFP6000_CPPTGT_PCIE:
+ case NFP6000_CPPTGT_ARM:
+ case NFP6000_CPPTGT_CRYPTO:
+ case NFP6000_CPPTGT_CLS:
+ return _nfp6000_decode_basic(addr, dest_island, cpp_tgt, mode,
+ addr40, isld1, isld0);
+
+ case NFP6000_CPPTGT_MU:
+ return _nfp6000_decode_mu(addr, dest_island, mode, addr40,
+ isld1, isld0);
+
+ case NFP6000_CPPTGT_CTXPB:
+ if (mode != 1 || addr40 != 0)
+ return -EINVAL;
+ *dest_island = (int)(addr >> 24) & 0x3F;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+_nfp6000_cppat_addr_iid_clear(uint64_t *addr, int cpp_tgt, int mode, int addr40)
+{
+ int iid_lsb, locality_lsb, da;
+
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_NBI:
+ case NFP6000_CPPTGT_VQDR:
+ case NFP6000_CPPTGT_ILA:
+ case NFP6000_CPPTGT_PCIE:
+ case NFP6000_CPPTGT_ARM:
+ case NFP6000_CPPTGT_CRYPTO:
+ case NFP6000_CPPTGT_CLS:
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 34 : 26;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ case 1:
+ iid_lsb = (addr40) ? 39 : 31;
+ *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0);
+ return 0;
+ case 2:
+ iid_lsb = (addr40) ? 38 : 30;
+ *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0);
+ return 0;
+ case 3:
+ iid_lsb = (addr40) ? 37 : 29;
+ *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0);
+ return 0;
+ default:
+ break;
+ }
+ case NFP6000_CPPTGT_MU:
+ locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+ da = (((*addr >> locality_lsb) & 3) ==
+ _NIC_NFP6000_MU_LOCALITY_DIRECT);
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ }
+ iid_lsb = (addr40) ? 37 : 29;
+ *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0);
+ return 0;
+ case 2:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ }
+
+ iid_lsb = (addr40) ? 36 : 28;
+ *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0);
+ return 0;
+ case 3:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ }
+
+ iid_lsb = (addr40) ? 35 : 27;
+ *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0);
+ return 0;
+ default:
+ break;
+ }
+ case NFP6000_CPPTGT_CTXPB:
+ if (mode != 1 || addr40 != 0)
+ return 0;
+ *addr &= ~(UINT64_C(0x3F) << 24);
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+#endif /* __NFP_CPPAT_H__ */
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
new file mode 100644
index 00000000..b8541c59
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_PLATFORM_H__
+#define __NFP_PLATFORM_H__
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <inttypes.h>
+#include <sys/cdefs.h>
+#include <sys/stat.h>
+#include <limits.h>
+#include <errno.h>
+
+#ifndef BIT_ULL
+#define BIT(x) (1 << (x))
+#define BIT_ULL(x) (1ULL << (x))
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#define NFP_ERRNO(err) (errno = (err), -1)
+#define NFP_ERRNO_RET(err, ret) (errno = (err), (ret))
+#define NFP_NOERR(errv) (errno)
+#define NFP_ERRPTR(err) (errno = (err), NULL)
+#define NFP_PTRERR(errv) (errno)
+
+#endif /* __NFP_PLATFORM_H__ */
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h
new file mode 100644
index 00000000..0e03948e
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h
@@ -0,0 +1,592 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_RESID_H__
+#define __NFP_RESID_H__
+
+#if (!defined(_NFP_RESID_NO_C_FUNC) && \
+ (defined(__NFP_TOOL_NFCC) || defined(__NFP_TOOL_NFAS)))
+#define _NFP_RESID_NO_C_FUNC
+#endif
+
+#ifndef _NFP_RESID_NO_C_FUNC
+#include "nfp_platform.h"
+#endif
+
+/*
+ * NFP Chip Architectures
+ *
+ * These are semi-arbitrary values to indicate an NFP architecture.
+ * They serve as a software view of a group of chip families, not necessarily a
+ * direct mapping to actual hardware design.
+ */
+#define NFP_CHIP_ARCH_YD 1
+#define NFP_CHIP_ARCH_TH 2
+
+/*
+ * NFP Chip Families.
+ *
+ * These are not enums, because they need to be microcode compatible.
+ * They are also not maskable.
+ *
+ * Note: The NFP-4xxx family is handled as NFP-6xxx in most software
+ * components.
+ *
+ */
+#define NFP_CHIP_FAMILY_NFP6000 0x6000 /* ARCH_TH */
+
+/* NFP Microengine/Flow Processing Core Versions */
+#define NFP_CHIP_ME_VERSION_2_7 0x0207
+#define NFP_CHIP_ME_VERSION_2_8 0x0208
+#define NFP_CHIP_ME_VERSION_2_9 0x0209
+
+/* NFP Chip Base Revisions. Minor stepping can just be added to these */
+#define NFP_CHIP_REVISION_A0 0x00
+#define NFP_CHIP_REVISION_B0 0x10
+#define NFP_CHIP_REVISION_C0 0x20
+#define NFP_CHIP_REVISION_PF 0xff /* Maximum possible revision */
+
+/* CPP Targets for each chip architecture */
+#define NFP6000_CPPTGT_NBI 1
+#define NFP6000_CPPTGT_VQDR 2
+#define NFP6000_CPPTGT_ILA 6
+#define NFP6000_CPPTGT_MU 7
+#define NFP6000_CPPTGT_PCIE 9
+#define NFP6000_CPPTGT_ARM 10
+#define NFP6000_CPPTGT_CRYPTO 12
+#define NFP6000_CPPTGT_CTXPB 14
+#define NFP6000_CPPTGT_CLS 15
+
+/*
+ * Wildcard indicating a CPP read or write action
+ *
+ * The action used will be either read or write depending on whether a read or
+ * write instruction/call is performed on the NFP_CPP_ID. It is recomended that
+ * the RW action is used even if all actions to be performed on a NFP_CPP_ID are
+ * known to be only reads or writes. Doing so will in many cases save NFP CPP
+ * internal software resources.
+ */
+#define NFP_CPP_ACTION_RW 32
+
+#define NFP_CPP_TARGET_ID_MASK 0x1f
+
+/*
+ * NFP_CPP_ID - pack target, token, and action into a CPP ID.
+ *
+ * Create a 32-bit CPP identifier representing the access to be made.
+ * These identifiers are used as parameters to other NFP CPP functions. Some
+ * CPP devices may allow wildcard identifiers to be specified.
+ *
+ * @param[in] target NFP CPP target id
+ * @param[in] action NFP CPP action id
+ * @param[in] token NFP CPP token id
+ * @return NFP CPP ID
+ */
+#define NFP_CPP_ID(target, action, token) \
+ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
+ (((action) & 0xff) << 8))
+
+#define NFP_CPP_ISLAND_ID(target, action, token, island) \
+ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
+ (((action) & 0xff) << 8) | (((island) & 0xff) << 0))
+
+#ifndef _NFP_RESID_NO_C_FUNC
+
+/**
+ * Return the NFP CPP target of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP target
+ */
+static inline uint8_t
+NFP_CPP_ID_TARGET_of(uint32_t id)
+{
+ return (id >> 24) & NFP_CPP_TARGET_ID_MASK;
+}
+
+/*
+ * Return the NFP CPP token of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP token
+ */
+static inline uint8_t
+NFP_CPP_ID_TOKEN_of(uint32_t id)
+{
+ return (id >> 16) & 0xff;
+}
+
+/*
+ * Return the NFP CPP action of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP action
+ */
+static inline uint8_t
+NFP_CPP_ID_ACTION_of(uint32_t id)
+{
+ return (id >> 8) & 0xff;
+}
+
+/*
+ * Return the NFP CPP action of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP action
+ */
+static inline uint8_t
+NFP_CPP_ID_ISLAND_of(uint32_t id)
+{
+ return (id) & 0xff;
+}
+
+#endif /* _NFP_RESID_NO_C_FUNC */
+
+/*
+ * Check if @p chip_family is an ARCH_TH chip.
+ * @param chip_family One of NFP_CHIP_FAMILY_*
+ */
+#define NFP_FAMILY_IS_ARCH_TH(chip_family) \
+ ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000)
+
+/*
+ * Get the NFP_CHIP_ARCH_* of @p chip_family.
+ * @param chip_family One of NFP_CHIP_FAMILY_*
+ */
+#define NFP_FAMILY_ARCH(x) \
+ (__extension__ ({ \
+ typeof(x) _x = (x); \
+ (NFP_FAMILY_IS_ARCH_TH(_x) ? NFP_CHIP_ARCH_TH : \
+ NFP_FAMILY_IS_ARCH_YD(_x) ? NFP_CHIP_ARCH_YD : -1) \
+ }))
+
+/*
+ * Check if @p chip_family is an NFP-6xxx chip.
+ * @param chip_family One of NFP_CHIP_FAMILY_*
+ */
+#define NFP_FAMILY_IS_NFP6000(chip_family) \
+ ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000)
+
+/*
+ * Make microengine ID for NFP-6xxx.
+ * @param island_id Island ID.
+ * @param menum ME number, 0 based, within island.
+ *
+ * NOTE: menum should really be unsigned - MSC compiler throws error (not
+ * warning) if a clause is always true i.e. menum >= 0 if cluster_num is type
+ * unsigned int hence the cast of the menum to an int in that particular clause
+ */
+#define NFP6000_MEID(a, b) \
+ (__extension__ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ (((((int)(_a) & 0x3F) == (int)(_a)) && \
+ (((int)(_b) >= 0) && ((int)(_b) < 12))) ? \
+ (int)(((_a) << 4) | ((_b) + 4)) : -1) \
+ }))
+
+/*
+ * Do a general sanity check on the ME ID.
+ * The check is on the highest possible island ID for the chip family and the
+ * microengine number must be a master ID.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_IS_VALID(meid) \
+ (__extension__ ({ \
+ typeof(meid) _a = (meid); \
+ ((((_a) >> 4) < 64) && (((_a) >> 4) >= 0) && \
+ (((_a) & 0xF) >= 4)) \
+ }))
+
+/*
+ * Extract island ID from ME ID.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_ISLAND_of(meid) (((meid) >> 4) & 0x3F)
+
+/*
+ * Extract microengine number (0 based) from ME ID.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_MENUM_of(meid) (((meid) & 0xF) - 4)
+
+/*
+ * Extract microengine group number (0 based) from ME ID.
+ * The group is two code-sharing microengines, so group 0 refers to MEs 0,1,
+ * group 1 refers to MEs 2,3 etc.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_MEGRP_of(meid) (NFP6000_MEID_MENUM_of(meid) >> 1)
+
+#ifndef _NFP_RESID_NO_C_FUNC
+
+/*
+ * Convert a string to an ME ID.
+ *
+ * @param s A string of format iX.meY
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME ID part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return ME ID on success, -1 on error.
+ */
+int nfp6000_idstr2meid(const char *s, const char **endptr);
+
+/*
+ * Extract island ID from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp6000_idstr2island("i32.me5", &c);
+ * // val == 32, c == "me5"
+ * val = nfp6000_idstr2island("i32", &c);
+ * // val == 32, c == ""
+ *
+ * @param s A string of format "iX.anything" or "iX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the island part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the island ID, -1 on error.
+ */
+int nfp6000_idstr2island(const char *s, const char **endptr);
+
+/*
+ * Extract microengine number from string.
+ *
+ * Example:
+ * char *c;
+ * int menum = nfp6000_idstr2menum("me5.anything", &c);
+ * // menum == 5, c == "anything"
+ * menum = nfp6000_idstr2menum("me5", &c);
+ * // menum == 5, c == ""
+ *
+ * @param s A string of format "meX.anything" or "meX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME number part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the ME number, -1 on error.
+ */
+int nfp6000_idstr2menum(const char *s, const char **endptr);
+
+/*
+ * Extract context number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp6000_idstr2ctxnum("ctx5.anything", &c);
+ * // val == 5, c == "anything"
+ * val = nfp6000_idstr2ctxnum("ctx5", &c);
+ * // val == 5, c == ""
+ *
+ * @param s A string of format "ctxN.anything" or "ctxN"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the context number part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the context number, -1 on error.
+ */
+int nfp6000_idstr2ctxnum(const char *s, const char **endptr);
+
+/*
+ * Extract microengine group number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp6000_idstr2megrp("tg2.anything", &c);
+ * // val == 2, c == "anything"
+ * val = nfp6000_idstr2megrp("tg5", &c);
+ * // val == 2, c == ""
+ *
+ * @param s A string of format "tgX.anything" or "tgX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME group part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the ME group number, -1 on error.
+ */
+int nfp6000_idstr2megrp(const char *s, const char **endptr);
+
+/*
+ * Create ME ID string of format "iX[.meY]".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_meid2str(char *s, int meid);
+
+/*
+ * Create ME ID string of format "name[.meY]" or "iX[.meY]".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ *
+ * Similar to nfp6000_meid2str() except use an alias instead of "iX"
+ * if one exists for the island.
+ */
+const char *nfp6000_meid2altstr(char *s, int meid);
+
+/*
+ * Create string of format "iX".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_island2str(char *s, int island_id);
+
+/*
+ * Create string of format "name", an island alias.
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_island2altstr(char *s, int island_id);
+
+/*
+ * Create string of format "meY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param menum Microengine number within island.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_menum2str(char *s, int menum);
+
+/*
+ * Create string of format "ctxY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param ctxnum Context number within microengine.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_ctxnum2str(char *s, int ctxnum);
+
+/*
+ * Create string of format "tgY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param megrp Microengine group number within cluster.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_megrp2str(char *s, int megrp);
+
+/*
+ * Convert a string to an ME ID.
+ *
+ * @param chip_family Chip family ID
+ * @param s A string of format iX.meY (or clX.meY)
+ * @param endptr If non-NULL, *endptr will point to the trailing
+ * string after the ME ID part of the string, which
+ * is either an empty string or the first character
+ * after the separating period.
+ * @return ME ID on success, -1 on error.
+ */
+int nfp_idstr2meid(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract island ID from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp_idstr2island(chip, "i32.me5", &c);
+ * // val == 32, c == "me5"
+ * val = nfp_idstr2island(chip, "i32", &c);
+ * // val == 32, c == ""
+ *
+ * @param chip_family Chip family ID
+ * @param s A string of format "iX.anything" or "iX"
+ * @param endptr If non-NULL, *endptr will point to the trailing
+ * striong after the ME ID part of the string, which
+ * is either an empty string or the first character
+ * after the separating period.
+ * @return The island ID on succes, -1 on error.
+ */
+int nfp_idstr2island(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract microengine number from string.
+ *
+ * Example:
+ * char *c;
+ * int menum = nfp_idstr2menum("me5.anything", &c);
+ * // menum == 5, c == "anything"
+ * menum = nfp_idstr2menum("me5", &c);
+ * // menum == 5, c == ""
+ *
+ * @param chip_family Chip family ID
+ * @param s A string of format "meX.anything" or "meX"
+ * @param endptr If non-NULL, *endptr will point to the trailing
+ * striong after the ME ID part of the string, which
+ * is either an empty string or the first character
+ * after the separating period.
+ * @return The ME number on succes, -1 on error.
+ */
+int nfp_idstr2menum(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract context number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp_idstr2ctxnum("ctx5.anything", &c);
+ * // val == 5, c == "anything"
+ * val = nfp_idstr2ctxnum("ctx5", &c);
+ * // val == 5, c == ""
+ *
+ * @param s A string of format "ctxN.anything" or "ctxN"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the context number part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the context number, -1 on error.
+ */
+int nfp_idstr2ctxnum(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract microengine group number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp_idstr2megrp("tg2.anything", &c);
+ * // val == 2, c == "anything"
+ * val = nfp_idstr2megrp("tg5", &c);
+ * // val == 5, c == ""
+ *
+ * @param s A string of format "tgX.anything" or "tgX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME group part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the ME group number, -1 on error.
+ */
+int nfp_idstr2megrp(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Create ME ID string of format "iX[.meY]".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_meid2str(int chip_family, char *s, int meid);
+
+/*
+ * Create ME ID string of format "name[.meY]" or "iX[.meY]".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ *
+ * Similar to nfp_meid2str() except use an alias instead of "iX"
+ * if one exists for the island.
+ */
+const char *nfp_meid2altstr(int chip_family, char *s, int meid);
+
+/*
+ * Create string of format "iX".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_island2str(int chip_family, char *s, int island_id);
+
+/*
+ * Create string of format "name", an island alias.
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_island2altstr(int chip_family, char *s, int island_id);
+
+/*
+ * Create string of format "meY".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param menum Microengine number within island.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_menum2str(int chip_family, char *s, int menum);
+
+/*
+ * Create string of format "ctxY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param ctxnum Context number within microengine.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_ctxnum2str(int chip_family, char *s, int ctxnum);
+
+/*
+ * Create string of format "tgY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param megrp Microengine group number within cluster.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_megrp2str(int chip_family, char *s, int megrp);
+
+/*
+ * Convert a two character string to revision number.
+ *
+ * Revision integer is 0x00 for A0, 0x11 for B1 etc.
+ *
+ * @param s Two character string.
+ * @return Revision number, -1 on error
+ */
+int nfp_idstr2rev(const char *s);
+
+/*
+ * Create string from revision number.
+ *
+ * String will be upper case.
+ *
+ * @param s Pointer to char buffer with size of at least 3
+ * for 2 characters and string terminator.
+ * @param rev Revision number.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_rev2str(char *s, int rev);
+
+/*
+ * Get the NFP CPP address from a string
+ *
+ * String is in the format [island@]target[:[action:[token:]]address]
+ *
+ * @param chip_family Chip family ID
+ * @param tid Pointer to string to parse
+ * @param cpp_idp Pointer to CPP ID
+ * @param cpp_addrp Pointer to CPP address
+ * @return 0 on success, or -1 and errno
+ */
+int nfp_str2cpp(int chip_family,
+ const char *tid,
+ uint32_t *cpp_idp,
+ uint64_t *cpp_addrp);
+
+
+#endif /* _NFP_RESID_NO_C_FUNC */
+
+#endif /* __NFP_RESID_H__ */
diff --git a/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h
new file mode 100644
index 00000000..47e1ddae
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_NFP6000_H__
+#define __NFP_NFP6000_H__
+
+/* CPP Target IDs */
+#define NFP_CPP_TARGET_INVALID 0
+#define NFP_CPP_TARGET_NBI 1
+#define NFP_CPP_TARGET_QDR 2
+#define NFP_CPP_TARGET_ILA 6
+#define NFP_CPP_TARGET_MU 7
+#define NFP_CPP_TARGET_PCIE 9
+#define NFP_CPP_TARGET_ARM 10
+#define NFP_CPP_TARGET_CRYPTO 12
+#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */
+#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */
+#define NFP_CPP_TARGET_CT_XPB 14
+#define NFP_CPP_TARGET_LOCAL_SCRATCH 15
+#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH
+
+#define NFP_ISL_EMEM0 24
+
+#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL
+#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL
+
+static inline int
+nfp_cppat_mu_locality_lsb(int mode, int addr40)
+{
+ switch (mode) {
+ case 0 ... 3:
+ return addr40 ? 38 : 30;
+ default:
+ return -EINVAL;
+ }
+}
+
+#endif /* NFP_NFP6000_H */
diff --git a/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h
new file mode 100644
index 00000000..7ada1bb2
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_XPB_H__
+#define __NFP_XPB_H__
+
+/*
+ * For use with NFP6000 Databook "XPB Addressing" section
+ */
+#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24)
+
+#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000)
+
+#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F)
+
+/*
+ * For use with NFP6000 Databook "XPB Island and Device IDs" chapter
+ */
+#define NFP_XPB_DEVICE(island, slave, device) \
+ (NFP_XPB_OVERLAY(island) | \
+ (((slave) & 3) << 22) | \
+ (((device) & 0x3f) << 16))
+
+#endif /* NFP_XPB_H */
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp.h b/drivers/net/nfp/nfpcore/nfp_cpp.h
new file mode 100644
index 00000000..de2ff84e
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_cpp.h
@@ -0,0 +1,779 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CPP_H__
+#define __NFP_CPP_H__
+
+#include "nfp-common/nfp_platform.h"
+#include "nfp-common/nfp_resid.h"
+
+struct nfp_cpp_mutex;
+
+/*
+ * NFP CPP handle
+ */
+struct nfp_cpp {
+ uint32_t model;
+ uint32_t interface;
+ uint8_t *serial;
+ int serial_len;
+ void *priv;
+
+ /* Mutex cache */
+ struct nfp_cpp_mutex *mutex_cache;
+ const struct nfp_cpp_operations *op;
+
+ /*
+ * NFP-6xxx originating island IMB CPP Address Translation. CPP Target
+ * ID is index into array. Values are obtained at runtime from local
+ * island XPB CSRs.
+ */
+ uint32_t imb_cat_table[16];
+
+ int driver_lock_needed;
+};
+
+/*
+ * NFP CPP device area handle
+ */
+struct nfp_cpp_area {
+ struct nfp_cpp *cpp;
+ char *name;
+ unsigned long long offset;
+ unsigned long size;
+ /* Here follows the 'priv' part of nfp_cpp_area. */
+};
+
+/*
+ * NFP CPP operations structure
+ */
+struct nfp_cpp_operations {
+ /* Size of priv area in struct nfp_cpp_area */
+ size_t area_priv_size;
+
+ /* Instance an NFP CPP */
+ int (*init)(struct nfp_cpp *cpp, const char *devname);
+
+ /*
+ * Free the bus.
+ * Called only once, during nfp_cpp_unregister()
+ */
+ void (*free)(struct nfp_cpp *cpp);
+
+ /*
+ * Initialize a new NFP CPP area
+ * NOTE: This is _not_ serialized
+ */
+ int (*area_init)(struct nfp_cpp_area *area,
+ uint32_t dest,
+ unsigned long long address,
+ unsigned long size);
+ /*
+ * Clean up a NFP CPP area before it is freed
+ * NOTE: This is _not_ serialized
+ */
+ void (*area_cleanup)(struct nfp_cpp_area *area);
+
+ /*
+ * Acquire resources for a NFP CPP area
+ * Serialized
+ */
+ int (*area_acquire)(struct nfp_cpp_area *area);
+ /*
+ * Release resources for a NFP CPP area
+ * Serialized
+ */
+ void (*area_release)(struct nfp_cpp_area *area);
+ /*
+ * Return a void IO pointer to a NFP CPP area
+ * NOTE: This is _not_ serialized
+ */
+
+ void *(*area_iomem)(struct nfp_cpp_area *area);
+
+ void *(*area_mapped)(struct nfp_cpp_area *area);
+ /*
+ * Perform a read from a NFP CPP area
+ * Serialized
+ */
+ int (*area_read)(struct nfp_cpp_area *area,
+ void *kernel_vaddr,
+ unsigned long offset,
+ unsigned int length);
+ /*
+ * Perform a write to a NFP CPP area
+ * Serialized
+ */
+ int (*area_write)(struct nfp_cpp_area *area,
+ const void *kernel_vaddr,
+ unsigned long offset,
+ unsigned int length);
+};
+
+/*
+ * This should be the only external function the transport
+ * module supplies
+ */
+const struct nfp_cpp_operations *nfp_cpp_transport_operations(void);
+
+/*
+ * Set the model id
+ *
+ * @param cpp NFP CPP operations structure
+ * @param model Model ID
+ */
+void nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model);
+
+/*
+ * Set the private instance owned data of a nfp_cpp struct
+ *
+ * @param cpp NFP CPP operations structure
+ * @param interface Interface ID
+ */
+void nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface);
+
+/*
+ * Set the private instance owned data of a nfp_cpp struct
+ *
+ * @param cpp NFP CPP operations structure
+ * @param serial NFP serial byte array
+ * @param len Length of the serial byte array
+ */
+int nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial,
+ size_t serial_len);
+
+/*
+ * Set the private data of the nfp_cpp instance
+ *
+ * @param cpp NFP CPP operations structure
+ * @return Opaque device pointer
+ */
+void nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv);
+
+/*
+ * Return the private data of the nfp_cpp instance
+ *
+ * @param cpp NFP CPP operations structure
+ * @return Opaque device pointer
+ */
+void *nfp_cpp_priv(struct nfp_cpp *cpp);
+
+/*
+ * Get the privately allocated portion of a NFP CPP area handle
+ *
+ * @param cpp_area NFP CPP area handle
+ * @return Pointer to the private area, or NULL on failure
+ */
+void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
+
+uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp);
+
+/*
+ * NFP CPP core interface for CPP clients.
+ */
+
+/*
+ * Open a NFP CPP handle to a CPP device
+ *
+ * @param[in] id 0-based ID for the CPP interface to use
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp *nfp_cpp_from_device_name(const char *devname,
+ int driver_lock_needed);
+
+/*
+ * Free a NFP CPP handle
+ *
+ * @param[in] cpp NFP CPP handle
+ */
+void nfp_cpp_free(struct nfp_cpp *cpp);
+
+#define NFP_CPP_MODEL_INVALID 0xffffffff
+
+/*
+ * NFP_CPP_MODEL_CHIP_of - retrieve the chip ID from the model ID
+ *
+ * The chip ID is a 16-bit BCD+A-F encoding for the chip type.
+ *
+ * @param[in] model NFP CPP model id
+ * @return NFP CPP chip id
+ */
+#define NFP_CPP_MODEL_CHIP_of(model) (((model) >> 16) & 0xffff)
+
+/*
+ * NFP_CPP_MODEL_IS_6000 - Check for the NFP6000 family of devices
+ *
+ * NOTE: The NFP4000 series is considered as a NFP6000 series variant.
+ *
+ * @param[in] model NFP CPP model id
+ * @return true if model is in the NFP6000 family, false otherwise.
+ */
+#define NFP_CPP_MODEL_IS_6000(model) \
+ ((NFP_CPP_MODEL_CHIP_of(model) >= 0x4000) && \
+ (NFP_CPP_MODEL_CHIP_of(model) < 0x7000))
+
+/*
+ * nfp_cpp_model - Retrieve the Model ID of the NFP
+ *
+ * @param[in] cpp NFP CPP handle
+ * @return NFP CPP Model ID
+ */
+uint32_t nfp_cpp_model(struct nfp_cpp *cpp);
+
+/*
+ * NFP Interface types - logical interface for this CPP connection 4 bits are
+ * reserved for interface type.
+ */
+#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0
+#define NFP_CPP_INTERFACE_TYPE_PCI 0x1
+#define NFP_CPP_INTERFACE_TYPE_ARM 0x2
+#define NFP_CPP_INTERFACE_TYPE_RPC 0x3
+#define NFP_CPP_INTERFACE_TYPE_ILA 0x4
+
+/*
+ * Construct a 16-bit NFP Interface ID
+ *
+ * Interface IDs consists of 4 bits of interface type, 4 bits of unit
+ * identifier, and 8 bits of channel identifier.
+ *
+ * The NFP Interface ID is used in the implementation of NFP CPP API mutexes,
+ * which use the MU Atomic CompareAndWrite operation - hence the limit to 16
+ * bits to be able to use the NFP Interface ID as a lock owner.
+ *
+ * @param[in] type NFP Interface Type
+ * @param[in] unit Unit identifier for the interface type
+ * @param[in] channel Channel identifier for the interface unit
+ * @return Interface ID
+ */
+#define NFP_CPP_INTERFACE(type, unit, channel) \
+ ((((type) & 0xf) << 12) | \
+ (((unit) & 0xf) << 8) | \
+ (((channel) & 0xff) << 0))
+
+/*
+ * Get the interface type of a NFP Interface ID
+ * @param[in] interface NFP Interface ID
+ * @return NFP Interface ID's type
+ */
+#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf)
+
+/*
+ * Get the interface unit of a NFP Interface ID
+ * @param[in] interface NFP Interface ID
+ * @return NFP Interface ID's unit
+ */
+#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf)
+
+/*
+ * Get the interface channel of a NFP Interface ID
+ * @param[in] interface NFP Interface ID
+ * @return NFP Interface ID's channel
+ */
+#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff)
+
+/*
+ * Retrieve the Interface ID of the NFP
+ * @param[in] cpp NFP CPP handle
+ * @return NFP CPP Interface ID
+ */
+uint16_t nfp_cpp_interface(struct nfp_cpp *cpp);
+
+/*
+ * Retrieve the NFP Serial Number (unique per NFP)
+ * @param[in] cpp NFP CPP handle
+ * @param[out] serial Pointer to reference the serial number array
+ *
+ * @return size of the NFP6000 serial number, in bytes
+ */
+int nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial);
+
+/*
+ * Allocate a NFP CPP area handle, as an offset into a CPP ID
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] size Size of the area to reserve
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address,
+ unsigned long size);
+
+/*
+ * Allocate a NFP CPP area handle, as an offset into a CPP ID, by a named owner
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] name Name of owner of the area
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] size Size of the area to reserve
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
+ uint32_t cpp_id,
+ const char *name,
+ unsigned long long address,
+ unsigned long size);
+
+/*
+ * Free an allocated NFP CPP area handle
+ * @param[in] area NFP CPP area handle
+ */
+void nfp_cpp_area_free(struct nfp_cpp_area *area);
+
+/*
+ * Acquire the resources needed to access the NFP CPP area handle
+ *
+ * @param[in] area NFP CPP area handle
+ *
+ * @return 0 on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_acquire(struct nfp_cpp_area *area);
+
+/*
+ * Release the resources needed to access the NFP CPP area handle
+ *
+ * @param[in] area NFP CPP area handle
+ */
+void nfp_cpp_area_release(struct nfp_cpp_area *area);
+
+/*
+ * Allocate, then acquire the resources needed to access the NFP CPP area handle
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] size Size of the area to reserve
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp_area *nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp,
+ uint32_t cpp_id,
+ unsigned long long address,
+ unsigned long size);
+
+/*
+ * Release the resources, then free the NFP CPP area handle
+ * @param[in] area NFP CPP area handle
+ */
+void nfp_cpp_area_release_free(struct nfp_cpp_area *area);
+
+uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target,
+ uint64_t addr, unsigned long size,
+ struct nfp_cpp_area **area);
+/*
+ * Return an IO pointer to the beginning of the NFP CPP area handle. The area
+ * must be acquired with 'nfp_cpp_area_acquire()' before calling this operation.
+ *
+ * @param[in] area NFP CPP area handle
+ *
+ * @return Pointer to IO memory, or NULL on failure (and set errno accordingly).
+ */
+void *nfp_cpp_area_mapped(struct nfp_cpp_area *area);
+
+/*
+ * Read from a NFP CPP area handle into a buffer. The area must be acquired with
+ * 'nfp_cpp_area_acquire()' before calling this operation.
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the area
+ * @param[in] buffer Location of buffer to receive the data
+ * @param[in] length Length of the data to read
+ *
+ * @return bytes read on success, -1 on failure (and set errno accordingly).
+ *
+ */
+int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
+ void *buffer, size_t length);
+
+/*
+ * Write to a NFP CPP area handle from a buffer. The area must be acquired with
+ * 'nfp_cpp_area_acquire()' before calling this operation.
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the area
+ * @param[in] buffer Location of buffer that holds the data
+ * @param[in] length Length of the data to read
+ *
+ * @return bytes written on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
+ const void *buffer, size_t length);
+
+/*
+ * nfp_cpp_area_iomem() - get IOMEM region for CPP area
+ * @area: CPP area handle
+ *
+ * Returns an iomem pointer for use with readl()/writel() style operations.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: pointer to the area, or NULL
+ */
+void *nfp_cpp_area_iomem(struct nfp_cpp_area *area);
+
+/*
+ * Verify that IO can be performed on an offset in an area
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the area
+ * @param[in] size Size of region to validate
+ *
+ * @return 0 on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
+ unsigned long long offset, unsigned long size);
+
+/*
+ * Get the NFP CPP handle that is the parent of a NFP CPP area handle
+ *
+ * @param cpp_area NFP CPP area handle
+ * @return NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
+
+/*
+ * Get the name passed during allocation of the NFP CPP area handle
+ *
+ * @param cpp_area NFP CPP area handle
+ * @return Pointer to the area's name
+ */
+const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
+
+/*
+ * Read a block of data from a NFP CPP ID
+ *
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] kernel_vaddr Buffer to copy read data to
+ * @param[in] length Size of the area to reserve
+ *
+ * @return bytes read on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_read(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, void *kernel_vaddr, size_t length);
+
+/*
+ * Write a block of data to a NFP CPP ID
+ *
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] kernel_vaddr Buffer to copy write data from
+ * @param[in] length Size of the area to reserve
+ *
+ * @return bytes written on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_write(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, const void *kernel_vaddr,
+ size_t length);
+
+
+
+/*
+ * Fill a NFP CPP area handle and offset with a value
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the NFP CPP ID address space
+ * @param[in] value 32-bit value to fill area with
+ * @param[in] length Size of the area to reserve
+ *
+ * @return bytes written on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value, size_t length);
+
+/*
+ * Read a single 32-bit value from a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value output value
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 32-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t *value);
+
+/*
+ * Write a single 32-bit value to a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value value to write
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 32-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value);
+
+/*
+ * Read a single 64-bit value from a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value output value
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 64-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t *value);
+
+/*
+ * Write a single 64-bit value to a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value value to write
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 64-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t value);
+
+/*
+ * Write a single 32-bit value on the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param value value to write
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t value);
+
+/*
+ * Read a single 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param value output value
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t *value);
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to modify
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value);
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to monitor for
+ * @param timeout_us maximum number of us to wait (-1 for forever)
+ *
+ * @return >= 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value, int timeout_us);
+
+/*
+ * Read a 32-bit word from a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value output value
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint32_t *value);
+
+/*
+ * Write a 32-bit value to a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value value to write
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ *
+ */
+int nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint32_t value);
+
+/*
+ * Read a 64-bit work from a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value output value
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint64_t *value);
+
+/*
+ * Write a 64-bit value to a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value value to write
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint64_t value);
+
+/*
+ * Initialize a mutex location
+
+ * The CPP target:address must point to a 64-bit aligned location, and will
+ * initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this nfp_cpp_interface().
+ *
+ * This function should only be called when setting up the initial lock state
+ * upon boot-up of the system.
+ *
+ * @param cpp NFP CPP handle
+ * @param target NFP CPP target ID
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key_id Unique 32-bit value for this mutex
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target,
+ unsigned long long address, uint32_t key_id);
+
+/*
+ * Create a mutex handle from an address controlled by a MU Atomic engine
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and reserve
+ * 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the MU Atomic
+ * Engine's CmpAndSwap32 command are supported.
+ *
+ * @param cpp NFP CPP handle
+ * @param target NFP CPP target ID
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key_id 32-bit unique key (must match the key at this location)
+ *
+ * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on
+ * failure.
+ */
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address,
+ uint32_t key_id);
+
+/*
+ * Get the NFP CPP handle the mutex was created with
+ *
+ * @param mutex NFP mutex handle
+ * @return NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex key
+ *
+ * @param mutex NFP mutex handle
+ * @return Mutex key
+ */
+uint32_t nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex owner
+ *
+ * @param mutex NFP mutex handle
+ * @return Interface ID of the mutex owner
+ *
+ * NOTE: This is for debug purposes ONLY - the owner may change at any time,
+ * unless it has been locked by this NFP CPP handle.
+ */
+uint16_t nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex target
+ *
+ * @param mutex NFP mutex handle
+ * @return Mutex CPP target (ie NFP_CPP_TARGET_MU)
+ */
+int nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex address
+ *
+ * @param mutex NFP mutex handle
+ * @return Mutex CPP address
+ */
+uint64_t nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Free a mutex handle - does not alter the lock state
+ *
+ * @param mutex NFP CPP Mutex handle
+ */
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Unlock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Attempt to lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ * @return 0 if the lock succeeded, -1 on failure (and errno set
+ * appropriately).
+ */
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+
+#endif /* !__NFP_CPP_H__ */
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
new file mode 100644
index 00000000..2f5e7f6d
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
@@ -0,0 +1,941 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+/*
+ * nfp_cpp_pcie_ops.c
+ * Authors: Vinayak Tammineedi <vinayak.tammineedi@netronome.com>
+ *
+ * Multiplexes the NFP BARs between NFP internal resources and
+ * implements the PCIe specific interface for generic CPP bus access.
+ *
+ * The BARs are managed and allocated if they are available.
+ * The generic CPP bus abstraction builds upon this BAR interface.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <execinfo.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <dirent.h>
+#include <libgen.h>
+
+#include <sys/mman.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+
+#include <rte_string_fns.h>
+
+#include "nfp_cpp.h"
+#include "nfp_target.h"
+#include "nfp6000/nfp6000.h"
+
+#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0)
+
+#define NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(_x) (((_x) & 0x1f) << 16)
+#define NFP_PCIE_BAR_PCIE2CPP_BASEADDRESS(_x) (((_x) & 0xffff) << 0)
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT(_x) (((_x) & 0x3) << 27)
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT 0
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT 1
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE 3
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE(_x) (((_x) & 0x7) << 29)
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(_x) (((_x) >> 29) & 0x7)
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED 0
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK 1
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_TARGET 2
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL 3
+#define NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(_x) (((_x) & 0xf) << 23)
+#define NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(_x) (((_x) & 0x3) << 21)
+
+/*
+ * Minimal size of the PCIe cfg memory we depend on being mapped,
+ * queue controller and DMA controller don't have to be covered.
+ */
+#define NFP_PCI_MIN_MAP_SIZE 0x080000
+
+#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
+#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
+#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
+
+#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar, slot) \
+ (NFP_PCIE_BAR(0) + ((bar) * 8 + (slot)) * 4)
+
+#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \
+ (((bar) * 8 + (slot)) * 4)
+
+/*
+ * Define to enable a bit more verbose debug output.
+ * Set to 1 to enable a bit more verbose debug output.
+ */
+struct nfp_pcie_user;
+struct nfp6000_area_priv;
+
+/*
+ * struct nfp_bar - describes BAR configuration and usage
+ * @nfp: backlink to owner
+ * @barcfg: cached contents of BAR config CSR
+ * @base: the BAR's base CPP offset
+ * @mask: mask for the BAR aperture (read only)
+ * @bitsize: bitsize of BAR aperture (read only)
+ * @index: index of the BAR
+ * @lock: lock to specify if bar is in use
+ * @refcnt: number of current users
+ * @iomem: mapped IO memory
+ */
+#define NFP_BAR_MAX 7
+struct nfp_bar {
+ struct nfp_pcie_user *nfp;
+ uint32_t barcfg;
+ uint64_t base; /* CPP address base */
+ uint64_t mask; /* Bit mask of the bar */
+ uint32_t bitsize; /* Bit size of the bar */
+ int index;
+ int lock;
+
+ char *csr;
+ char *iomem;
+};
+
+#define BUSDEV_SZ 13
+struct nfp_pcie_user {
+ struct nfp_bar bar[NFP_BAR_MAX];
+
+ int device;
+ int lock;
+ char busdev[BUSDEV_SZ];
+ int barsz;
+ char *cfg;
+};
+
+static uint32_t
+nfp_bar_maptype(struct nfp_bar *bar)
+{
+ return NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(bar->barcfg);
+}
+
+#define TARGET_WIDTH_32 4
+#define TARGET_WIDTH_64 8
+
+static int
+nfp_compute_bar(const struct nfp_bar *bar, uint32_t *bar_config,
+ uint64_t *bar_base, int tgt, int act, int tok,
+ uint64_t offset, size_t size, int width)
+{
+ uint32_t bitsize;
+ uint32_t newcfg;
+ uint64_t mask;
+
+ if (tgt >= 16)
+ return -EINVAL;
+
+ switch (width) {
+ case 8:
+ newcfg =
+ NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT
+ (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT);
+ break;
+ case 4:
+ newcfg =
+ NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT
+ (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT);
+ break;
+ case 0:
+ newcfg =
+ NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT
+ (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (act != NFP_CPP_ACTION_RW && act != 0) {
+ /* Fixed CPP mapping with specific action */
+ mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
+
+ newcfg |=
+ NFP_PCIE_BAR_PCIE2CPP_MAPTYPE
+ (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(act);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok);
+
+ if ((offset & mask) != ((offset + size - 1) & mask)) {
+ printf("BAR%d: Won't use for Fixed mapping\n",
+ bar->index);
+ printf("\t<%#llx,%#llx>, action=%d\n",
+ (unsigned long long)offset,
+ (unsigned long long)(offset + size), act);
+ printf("\tBAR too small (0x%llx).\n",
+ (unsigned long long)mask);
+ return -EINVAL;
+ }
+ offset &= mask;
+
+#ifdef DEBUG
+ printf("BAR%d: Created Fixed mapping\n", bar->index);
+ printf("\t%d:%d:%d:0x%#llx-0x%#llx>\n", tgt, act, tok,
+ (unsigned long long)offset,
+ (unsigned long long)(offset + mask));
+#endif
+
+ bitsize = 40 - 16;
+ } else {
+ mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
+
+ /* Bulk mapping */
+ newcfg |=
+ NFP_PCIE_BAR_PCIE2CPP_MAPTYPE
+ (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK);
+
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok);
+
+ if ((offset & mask) != ((offset + size - 1) & mask)) {
+ printf("BAR%d: Won't use for bulk mapping\n",
+ bar->index);
+ printf("\t<%#llx,%#llx>\n", (unsigned long long)offset,
+ (unsigned long long)(offset + size));
+ printf("\ttarget=%d, token=%d\n", tgt, tok);
+ printf("\tBAR too small (%#llx) - (%#llx != %#llx).\n",
+ (unsigned long long)mask,
+ (unsigned long long)(offset & mask),
+ (unsigned long long)(offset + size - 1) & mask);
+
+ return -EINVAL;
+ }
+
+ offset &= mask;
+
+#ifdef DEBUG
+ printf("BAR%d: Created bulk mapping %d:x:%d:%#llx-%#llx\n",
+ bar->index, tgt, tok, (unsigned long long)offset,
+ (unsigned long long)(offset + ~mask));
+#endif
+
+ bitsize = 40 - 21;
+ }
+
+ if (bar->bitsize < bitsize) {
+ printf("BAR%d: Too small for %d:%d:%d\n", bar->index, tgt, tok,
+ act);
+ return -EINVAL;
+ }
+
+ newcfg |= offset >> bitsize;
+
+ if (bar_base)
+ *bar_base = offset;
+
+ if (bar_config)
+ *bar_config = newcfg;
+
+ return 0;
+}
+
+static int
+nfp_bar_write(struct nfp_pcie_user *nfp, struct nfp_bar *bar,
+ uint32_t newcfg)
+{
+ int base, slot;
+
+ base = bar->index >> 3;
+ slot = bar->index & 7;
+
+ if (!nfp->cfg)
+ return (-ENOMEM);
+
+ bar->csr = nfp->cfg +
+ NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(base, slot);
+
+ *(uint32_t *)(bar->csr) = newcfg;
+
+ bar->barcfg = newcfg;
+#ifdef DEBUG
+ printf("BAR%d: updated to 0x%08x\n", bar->index, newcfg);
+#endif
+
+ return 0;
+}
+
+static int
+nfp_reconfigure_bar(struct nfp_pcie_user *nfp, struct nfp_bar *bar, int tgt,
+ int act, int tok, uint64_t offset, size_t size, int width)
+{
+ uint64_t newbase;
+ uint32_t newcfg;
+ int err;
+
+ err = nfp_compute_bar(bar, &newcfg, &newbase, tgt, act, tok, offset,
+ size, width);
+ if (err)
+ return err;
+
+ bar->base = newbase;
+
+ return nfp_bar_write(nfp, bar, newcfg);
+}
+
+/*
+ * Map all PCI bars. We assume that the BAR with the PCIe config block is
+ * already mapped.
+ *
+ * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
+ */
+static int
+nfp_enable_bars(struct nfp_pcie_user *nfp)
+{
+ struct nfp_bar *bar;
+ int x;
+
+ for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
+ bar = &nfp->bar[x - 1];
+ bar->barcfg = 0;
+ bar->nfp = nfp;
+ bar->index = x;
+ bar->mask = (1 << (nfp->barsz - 3)) - 1;
+ bar->bitsize = nfp->barsz - 3;
+ bar->base = 0;
+ bar->iomem = NULL;
+ bar->lock = 0;
+ bar->csr = nfp->cfg +
+ NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3,
+ bar->index & 7);
+ bar->iomem =
+ (char *)mmap(0, 1 << bar->bitsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, nfp->device,
+ bar->index << bar->bitsize);
+
+ if (bar->iomem == MAP_FAILED)
+ return (-ENOMEM);
+ }
+ return 0;
+}
+
+static struct nfp_bar *
+nfp_alloc_bar(struct nfp_pcie_user *nfp)
+{
+ struct nfp_bar *bar;
+ int x;
+
+ for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
+ bar = &nfp->bar[x - 1];
+ if (!bar->lock) {
+ bar->lock = 1;
+ return bar;
+ }
+ }
+ return NULL;
+}
+
+static void
+nfp_disable_bars(struct nfp_pcie_user *nfp)
+{
+ struct nfp_bar *bar;
+ int x;
+
+ for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
+ bar = &nfp->bar[x - 1];
+ if (bar->iomem) {
+ munmap(bar->iomem, 1 << (nfp->barsz - 3));
+ bar->iomem = NULL;
+ bar->lock = 0;
+ }
+ }
+}
+
+/*
+ * Generic CPP bus access interface.
+ */
+
+struct nfp6000_area_priv {
+ struct nfp_bar *bar;
+ uint32_t bar_offset;
+
+ uint32_t target;
+ uint32_t action;
+ uint32_t token;
+ uint64_t offset;
+ struct {
+ int read;
+ int write;
+ int bar;
+ } width;
+ size_t size;
+ char *iomem;
+};
+
+static int
+nfp6000_area_init(struct nfp_cpp_area *area, uint32_t dest,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp_pcie_user *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ uint32_t target = NFP_CPP_ID_TARGET_of(dest);
+ uint32_t action = NFP_CPP_ID_ACTION_of(dest);
+ uint32_t token = NFP_CPP_ID_TOKEN_of(dest);
+ int pp, ret = 0;
+
+ pp = nfp6000_target_pushpull(NFP_CPP_ID(target, action, token),
+ address);
+ if (pp < 0)
+ return pp;
+
+ priv->width.read = PUSH_WIDTH(pp);
+ priv->width.write = PULL_WIDTH(pp);
+
+ if (priv->width.read > 0 &&
+ priv->width.write > 0 && priv->width.read != priv->width.write)
+ return -EINVAL;
+
+ if (priv->width.read > 0)
+ priv->width.bar = priv->width.read;
+ else
+ priv->width.bar = priv->width.write;
+
+ priv->bar = nfp_alloc_bar(nfp);
+ if (priv->bar == NULL)
+ return -ENOMEM;
+
+ priv->target = target;
+ priv->action = action;
+ priv->token = token;
+ priv->offset = address;
+ priv->size = size;
+
+ ret = nfp_reconfigure_bar(nfp, priv->bar, priv->target, priv->action,
+ priv->token, priv->offset, priv->size,
+ priv->width.bar);
+
+ return ret;
+}
+
+static int
+nfp6000_area_acquire(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+ /* Calculate offset into BAR. */
+ if (nfp_bar_maptype(priv->bar) ==
+ NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL) {
+ priv->bar_offset = priv->offset &
+ (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
+ priv->bar_offset +=
+ NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(priv->bar,
+ priv->target);
+ priv->bar_offset +=
+ NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(priv->bar, priv->token);
+ } else {
+ priv->bar_offset = priv->offset & priv->bar->mask;
+ }
+
+ /* Must have been too big. Sub-allocate. */
+ if (!priv->bar->iomem)
+ return (-ENOMEM);
+
+ priv->iomem = priv->bar->iomem + priv->bar_offset;
+
+ return 0;
+}
+
+static void *
+nfp6000_area_mapped(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *area_priv = nfp_cpp_area_priv(area);
+
+ if (!area_priv->iomem)
+ return NULL;
+
+ return area_priv->iomem;
+}
+
+static void
+nfp6000_area_release(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ priv->bar->lock = 0;
+ priv->bar = NULL;
+ priv->iomem = NULL;
+}
+
+static void *
+nfp6000_area_iomem(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ return priv->iomem;
+}
+
+static int
+nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
+ unsigned long offset, unsigned int length)
+{
+ uint64_t *wrptr64 = kernel_vaddr;
+ const volatile uint64_t *rdptr64;
+ struct nfp6000_area_priv *priv;
+ uint32_t *wrptr32 = kernel_vaddr;
+ const volatile uint32_t *rdptr32;
+ int width;
+ unsigned int n;
+ bool is_64;
+
+ priv = nfp_cpp_area_priv(area);
+ rdptr64 = (uint64_t *)(priv->iomem + offset);
+ rdptr32 = (uint32_t *)(priv->iomem + offset);
+
+ if (offset + length > priv->size)
+ return -EFAULT;
+
+ width = priv->width.read;
+
+ if (width <= 0)
+ return -EINVAL;
+
+ /* Unaligned? Translate to an explicit access */
+ if ((priv->offset + offset) & (width - 1)) {
+ printf("aread_read unaligned!!!\n");
+ return -EINVAL;
+ }
+
+ is_64 = width == TARGET_WIDTH_64;
+
+ /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+ priv->action == NFP_CPP_ACTION_RW) {
+ is_64 = false;
+ }
+
+ if (is_64) {
+ if (offset % sizeof(uint64_t) != 0 ||
+ length % sizeof(uint64_t) != 0)
+ return -EINVAL;
+ } else {
+ if (offset % sizeof(uint32_t) != 0 ||
+ length % sizeof(uint32_t) != 0)
+ return -EINVAL;
+ }
+
+ if (!priv->bar)
+ return -EFAULT;
+
+ if (is_64)
+ for (n = 0; n < length; n += sizeof(uint64_t)) {
+ *wrptr64 = *rdptr64;
+ wrptr64++;
+ rdptr64++;
+ }
+ else
+ for (n = 0; n < length; n += sizeof(uint32_t)) {
+ *wrptr32 = *rdptr32;
+ wrptr32++;
+ rdptr32++;
+ }
+
+ return n;
+}
+
+static int
+nfp6000_area_write(struct nfp_cpp_area *area, const void *kernel_vaddr,
+ unsigned long offset, unsigned int length)
+{
+ const uint64_t *rdptr64 = kernel_vaddr;
+ uint64_t *wrptr64;
+ const uint32_t *rdptr32 = kernel_vaddr;
+ struct nfp6000_area_priv *priv;
+ uint32_t *wrptr32;
+ int width;
+ unsigned int n;
+ bool is_64;
+
+ priv = nfp_cpp_area_priv(area);
+ wrptr64 = (uint64_t *)(priv->iomem + offset);
+ wrptr32 = (uint32_t *)(priv->iomem + offset);
+
+ if (offset + length > priv->size)
+ return -EFAULT;
+
+ width = priv->width.write;
+
+ if (width <= 0)
+ return -EINVAL;
+
+ /* Unaligned? Translate to an explicit access */
+ if ((priv->offset + offset) & (width - 1))
+ return -EINVAL;
+
+ is_64 = width == TARGET_WIDTH_64;
+
+ /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+ priv->action == NFP_CPP_ACTION_RW)
+ is_64 = false;
+
+ if (is_64) {
+ if (offset % sizeof(uint64_t) != 0 ||
+ length % sizeof(uint64_t) != 0)
+ return -EINVAL;
+ } else {
+ if (offset % sizeof(uint32_t) != 0 ||
+ length % sizeof(uint32_t) != 0)
+ return -EINVAL;
+ }
+
+ if (!priv->bar)
+ return -EFAULT;
+
+ if (is_64)
+ for (n = 0; n < length; n += sizeof(uint64_t)) {
+ *wrptr64 = *rdptr64;
+ wrptr64++;
+ rdptr64++;
+ }
+ else
+ for (n = 0; n < length; n += sizeof(uint32_t)) {
+ *wrptr32 = *rdptr32;
+ wrptr32++;
+ rdptr32++;
+ }
+
+ return n;
+}
+
+#define PCI_DEVICES "/sys/bus/pci/devices"
+
+static int
+nfp_acquire_process_lock(struct nfp_pcie_user *desc)
+{
+ int rc;
+ struct flock lock;
+ char lockname[30];
+
+ memset(&lock, 0, sizeof(lock));
+
+ snprintf(lockname, sizeof(lockname), "/var/lock/nfp_%s", desc->busdev);
+ desc->lock = open(lockname, O_RDWR | O_CREAT, 0666);
+ if (desc->lock < 0)
+ return desc->lock;
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ rc = -1;
+ while (rc != 0) {
+ rc = fcntl(desc->lock, F_SETLKW, &lock);
+ if (rc < 0) {
+ if (errno != EAGAIN && errno != EACCES) {
+ close(desc->lock);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp6000_set_model(struct nfp_pcie_user *desc, struct nfp_cpp *cpp)
+{
+ char tmp_str[80];
+ uint32_t tmp;
+ int fp;
+
+ snprintf(tmp_str, sizeof(tmp_str), "%s/%s/config", PCI_DEVICES,
+ desc->busdev);
+
+ fp = open(tmp_str, O_RDONLY);
+ if (!fp)
+ return -1;
+
+ lseek(fp, 0x2e, SEEK_SET);
+
+ if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
+ printf("Error reading config file for model\n");
+ return -1;
+ }
+
+ tmp = tmp << 16;
+
+ if (close(fp) == -1)
+ return -1;
+
+ nfp_cpp_model_set(cpp, tmp);
+
+ return 0;
+}
+
+static int
+nfp6000_set_interface(struct nfp_pcie_user *desc, struct nfp_cpp *cpp)
+{
+ char tmp_str[80];
+ uint16_t tmp;
+ int fp;
+
+ snprintf(tmp_str, sizeof(tmp_str), "%s/%s/config", PCI_DEVICES,
+ desc->busdev);
+
+ fp = open(tmp_str, O_RDONLY);
+ if (!fp)
+ return -1;
+
+ lseek(fp, 0x154, SEEK_SET);
+
+ if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
+ printf("error reading config file for interface\n");
+ return -1;
+ }
+
+ if (close(fp) == -1)
+ return -1;
+
+ nfp_cpp_interface_set(cpp, tmp);
+
+ return 0;
+}
+
+#define PCI_CFG_SPACE_SIZE 256
+#define PCI_CFG_SPACE_EXP_SIZE 4096
+#define PCI_EXT_CAP_ID(header) (int)(header & 0x0000ffff)
+#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
+#define PCI_EXT_CAP_ID_DSN 0x03
+static int
+nfp_pci_find_next_ext_capability(int fp, int cap)
+{
+ uint32_t header;
+ int ttl;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+ lseek(fp, pos, SEEK_SET);
+ if (read(fp, &header, sizeof(header)) != sizeof(header)) {
+ printf("error reading config file for serial\n");
+ return -1;
+ }
+
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ lseek(fp, pos, SEEK_SET);
+ if (read(fp, &header, sizeof(header)) != sizeof(header)) {
+ printf("error reading config file for serial\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp6000_set_serial(struct nfp_pcie_user *desc, struct nfp_cpp *cpp)
+{
+ char tmp_str[80];
+ uint16_t tmp;
+ uint8_t serial[6];
+ int serial_len = 6;
+ int fp, pos;
+
+ snprintf(tmp_str, sizeof(tmp_str), "%s/%s/config", PCI_DEVICES,
+ desc->busdev);
+
+ fp = open(tmp_str, O_RDONLY);
+ if (!fp)
+ return -1;
+
+ pos = nfp_pci_find_next_ext_capability(fp, PCI_EXT_CAP_ID_DSN);
+ if (pos <= 0) {
+ printf("PCI_EXT_CAP_ID_DSN not found. Using default offset\n");
+ lseek(fp, 0x156, SEEK_SET);
+ } else {
+ lseek(fp, pos + 6, SEEK_SET);
+ }
+
+ if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
+ printf("error reading config file for serial\n");
+ return -1;
+ }
+
+ serial[4] = (uint8_t)((tmp >> 8) & 0xff);
+ serial[5] = (uint8_t)(tmp & 0xff);
+
+ if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
+ printf("error reading config file for serial\n");
+ return -1;
+ }
+
+ serial[2] = (uint8_t)((tmp >> 8) & 0xff);
+ serial[3] = (uint8_t)(tmp & 0xff);
+
+ if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
+ printf("error reading config file for serial\n");
+ return -1;
+ }
+
+ serial[0] = (uint8_t)((tmp >> 8) & 0xff);
+ serial[1] = (uint8_t)(tmp & 0xff);
+
+ if (close(fp) == -1)
+ return -1;
+
+ nfp_cpp_serial_set(cpp, serial, serial_len);
+
+ return 0;
+}
+
+static int
+nfp6000_set_barsz(struct nfp_pcie_user *desc)
+{
+ char tmp_str[80];
+ unsigned long start, end, flags, tmp;
+ int i;
+ FILE *fp;
+
+ snprintf(tmp_str, sizeof(tmp_str), "%s/%s/resource", PCI_DEVICES,
+ desc->busdev);
+
+ fp = fopen(tmp_str, "r");
+ if (!fp)
+ return -1;
+
+ if (fscanf(fp, "0x%lx 0x%lx 0x%lx", &start, &end, &flags) == 0) {
+ printf("error reading resource file for bar size\n");
+ fclose(fp);
+ return -1;
+ }
+
+ if (fclose(fp) == -1)
+ return -1;
+
+ tmp = (end - start) + 1;
+ i = 0;
+ while (tmp >>= 1)
+ i++;
+ desc->barsz = i;
+ return 0;
+}
+
+static int
+nfp6000_init(struct nfp_cpp *cpp, const char *devname)
+{
+ char link[120];
+ char tmp_str[80];
+ ssize_t size;
+ int ret = 0;
+ uint32_t model;
+ struct nfp_pcie_user *desc;
+
+ desc = malloc(sizeof(*desc));
+ if (!desc)
+ return -1;
+
+
+ memset(desc->busdev, 0, BUSDEV_SZ);
+ strlcpy(desc->busdev, devname, sizeof(desc->busdev));
+
+ if (cpp->driver_lock_needed) {
+ ret = nfp_acquire_process_lock(desc);
+ if (ret)
+ return -1;
+ }
+
+ snprintf(tmp_str, sizeof(tmp_str), "%s/%s/driver", PCI_DEVICES,
+ desc->busdev);
+
+ size = readlink(tmp_str, link, sizeof(link));
+
+ if (size == -1)
+ tmp_str[0] = '\0';
+
+ if (size == sizeof(link))
+ tmp_str[0] = '\0';
+
+ snprintf(tmp_str, sizeof(tmp_str), "%s/%s/resource0", PCI_DEVICES,
+ desc->busdev);
+
+ desc->device = open(tmp_str, O_RDWR);
+ if (desc->device == -1)
+ return -1;
+
+ if (nfp6000_set_model(desc, cpp) < 0)
+ return -1;
+ if (nfp6000_set_interface(desc, cpp) < 0)
+ return -1;
+ if (nfp6000_set_serial(desc, cpp) < 0)
+ return -1;
+ if (nfp6000_set_barsz(desc) < 0)
+ return -1;
+
+ desc->cfg = (char *)mmap(0, 1 << (desc->barsz - 3),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED, desc->device, 0);
+
+ if (desc->cfg == MAP_FAILED)
+ return -1;
+
+ nfp_enable_bars(desc);
+
+ nfp_cpp_priv_set(cpp, desc);
+
+ model = __nfp_cpp_model_autodetect(cpp);
+ nfp_cpp_model_set(cpp, model);
+
+ return ret;
+}
+
+static void
+nfp6000_free(struct nfp_cpp *cpp)
+{
+ struct nfp_pcie_user *desc = nfp_cpp_priv(cpp);
+ int x;
+
+ /* Unmap may cause if there are any pending transaxctions */
+ nfp_disable_bars(desc);
+ munmap(desc->cfg, 1 << (desc->barsz - 3));
+
+ for (x = ARRAY_SIZE(desc->bar); x > 0; x--) {
+ if (desc->bar[x - 1].iomem)
+ munmap(desc->bar[x - 1].iomem, 1 << (desc->barsz - 3));
+ }
+ if (cpp->driver_lock_needed)
+ close(desc->lock);
+ close(desc->device);
+ free(desc);
+}
+
+static const struct nfp_cpp_operations nfp6000_pcie_ops = {
+ .init = nfp6000_init,
+ .free = nfp6000_free,
+
+ .area_priv_size = sizeof(struct nfp6000_area_priv),
+ .area_init = nfp6000_area_init,
+ .area_acquire = nfp6000_area_acquire,
+ .area_release = nfp6000_area_release,
+ .area_mapped = nfp6000_area_mapped,
+ .area_read = nfp6000_area_read,
+ .area_write = nfp6000_area_write,
+ .area_iomem = nfp6000_area_iomem,
+};
+
+const struct
+nfp_cpp_operations *nfp_cpp_transport_operations(void)
+{
+ return &nfp6000_pcie_ops;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c
new file mode 100644
index 00000000..f61143f7
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c
@@ -0,0 +1,857 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+
+#include <rte_byteorder.h>
+
+#include "nfp_cpp.h"
+#include "nfp_target.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp6000/nfp_xpb.h"
+#include "nfp_nffw.h"
+
+#define NFP_PL_DEVICE_ID 0x00000004
+#define NFP_PL_DEVICE_ID_MASK 0xff
+
+#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144
+
+void
+nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv)
+{
+ cpp->priv = priv;
+}
+
+void *
+nfp_cpp_priv(struct nfp_cpp *cpp)
+{
+ return cpp->priv;
+}
+
+void
+nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model)
+{
+ cpp->model = model;
+}
+
+uint32_t
+nfp_cpp_model(struct nfp_cpp *cpp)
+{
+ if (!cpp)
+ return NFP_CPP_MODEL_INVALID;
+
+ if (cpp->model == 0)
+ cpp->model = __nfp_cpp_model_autodetect(cpp);
+
+ return cpp->model;
+}
+
+void
+nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface)
+{
+ cpp->interface = interface;
+}
+
+int
+nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial)
+{
+ *serial = cpp->serial;
+ return cpp->serial_len;
+}
+
+int
+nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial,
+ size_t serial_len)
+{
+ if (cpp->serial_len)
+ free(cpp->serial);
+
+ cpp->serial = malloc(serial_len);
+ if (!cpp->serial)
+ return -1;
+
+ memcpy(cpp->serial, serial, serial_len);
+ cpp->serial_len = serial_len;
+
+ return 0;
+}
+
+uint16_t
+nfp_cpp_interface(struct nfp_cpp *cpp)
+{
+ if (!cpp)
+ return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_INVALID, 0, 0);
+
+ return cpp->interface;
+}
+
+void *
+nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
+{
+ return &cpp_area[1];
+}
+
+struct nfp_cpp *
+nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->cpp;
+}
+
+const char *
+nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->name;
+}
+
+/*
+ * nfp_cpp_area_alloc - allocate a new CPP area
+ * @cpp: CPP handle
+ * @dest: CPP id
+ * @address: start address on CPP target
+ * @size: size of area in bytes
+ *
+ * Allocate and initialize a CPP area structure. The area must later
+ * be locked down with an 'acquire' before it can be safely accessed.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, uint32_t dest,
+ const char *name, unsigned long long address,
+ unsigned long size)
+{
+ struct nfp_cpp_area *area;
+ uint64_t tmp64 = (uint64_t)address;
+ int tmp, err;
+
+ if (!cpp)
+ return NULL;
+
+ /* CPP bus uses only a 40-bit address */
+ if ((address + size) > (1ULL << 40))
+ return NFP_ERRPTR(EFAULT);
+
+ /* Remap from cpp_island to cpp_target */
+ err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
+ if (err < 0)
+ return NULL;
+
+ address = (unsigned long long)tmp64;
+
+ if (!name)
+ name = "";
+
+ area = calloc(1, sizeof(*area) + cpp->op->area_priv_size +
+ strlen(name) + 1);
+ if (!area)
+ return NULL;
+
+ area->cpp = cpp;
+ area->name = ((char *)area) + sizeof(*area) + cpp->op->area_priv_size;
+ memcpy(area->name, name, strlen(name) + 1);
+
+ /*
+ * Preserve errno around the call to area_init, since most
+ * implementations will blindly call nfp_target_action_width()for both
+ * read or write modes, and that will set errno to EINVAL.
+ */
+ tmp = errno;
+
+ err = cpp->op->area_init(area, dest, address, size);
+ if (err < 0) {
+ free(area);
+ return NULL;
+ }
+
+ /* Restore errno */
+ errno = tmp;
+
+ area->offset = address;
+ area->size = size;
+
+ return area;
+}
+
+struct nfp_cpp_area *
+nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest,
+ unsigned long long address, unsigned long size)
+{
+ return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
+}
+
+/*
+ * nfp_cpp_area_alloc_acquire - allocate a new CPP area and lock it down
+ *
+ * @cpp: CPP handle
+ * @dest: CPP id
+ * @address: start address on CPP target
+ * @size: size of area
+ *
+ * Allocate and initilizae a CPP area structure, and lock it down so
+ * that it can be accessed directly.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ *
+ * NOTE: The area must also be 'released' when the structure is freed.
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, uint32_t destination,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp_cpp_area *area;
+
+ area = nfp_cpp_area_alloc(cpp, destination, address, size);
+ if (!area)
+ return NULL;
+
+ if (nfp_cpp_area_acquire(area)) {
+ nfp_cpp_area_free(area);
+ return NULL;
+ }
+
+ return area;
+}
+
+/*
+ * nfp_cpp_area_free - free up the CPP area
+ * area: CPP area handle
+ *
+ * Frees up memory resources held by the CPP area.
+ */
+void
+nfp_cpp_area_free(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_cleanup)
+ area->cpp->op->area_cleanup(area);
+ free(area);
+}
+
+/*
+ * nfp_cpp_area_release_free - release CPP area and free it
+ * area: CPP area handle
+ *
+ * Releases CPP area and frees up memory resources held by the it.
+ */
+void
+nfp_cpp_area_release_free(struct nfp_cpp_area *area)
+{
+ nfp_cpp_area_release(area);
+ nfp_cpp_area_free(area);
+}
+
+/*
+ * nfp_cpp_area_acquire - lock down a CPP area for access
+ * @area: CPP area handle
+ *
+ * Locks down the CPP area for a potential long term activity. Area
+ * must always be locked down before being accessed.
+ */
+int
+nfp_cpp_area_acquire(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_acquire) {
+ int err = area->cpp->op->area_acquire(area);
+
+ if (err < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * nfp_cpp_area_release - release a locked down CPP area
+ * @area: CPP area handle
+ *
+ * Releases a previously locked down CPP area.
+ */
+void
+nfp_cpp_area_release(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_release)
+ area->cpp->op->area_release(area);
+}
+
+/*
+ * nfp_cpp_area_iomem() - get IOMEM region for CPP area
+ *
+ * @area: CPP area handle
+ *
+ * Returns an iomem pointer for use with readl()/writel() style operations.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: pointer to the area, or NULL
+ */
+void *
+nfp_cpp_area_iomem(struct nfp_cpp_area *area)
+{
+ void *iomem = NULL;
+
+ if (area->cpp->op->area_iomem)
+ iomem = area->cpp->op->area_iomem(area);
+
+ return iomem;
+}
+
+/*
+ * nfp_cpp_area_read - read data from CPP area
+ *
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @kernel_vaddr: kernel address to put data into
+ * @length: number of bytes to read
+ *
+ * Read data from indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ */
+int
+nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
+ void *kernel_vaddr, size_t length)
+{
+ if ((offset + length) > area->size)
+ return NFP_ERRNO(EFAULT);
+
+ return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
+}
+
+/*
+ * nfp_cpp_area_write - write data to CPP area
+ *
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @kernel_vaddr: kernel address to read data from
+ * @length: number of bytes to write
+ *
+ * Write data to indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ */
+int
+nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
+ const void *kernel_vaddr, size_t length)
+{
+ if ((offset + length) > area->size)
+ return NFP_ERRNO(EFAULT);
+
+ return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
+}
+
+void *
+nfp_cpp_area_mapped(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_mapped)
+ return area->cpp->op->area_mapped(area);
+ return NULL;
+}
+
+/*
+ * nfp_cpp_area_check_range - check if address range fits in CPP area
+ *
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @length: size of address range in bytes
+ *
+ * Check if address range fits within CPP area. Return 0 if area fits
+ * or -1 on error.
+ */
+int
+nfp_cpp_area_check_range(struct nfp_cpp_area *area, unsigned long long offset,
+ unsigned long length)
+{
+ if (((offset + length) > area->size))
+ return NFP_ERRNO(EFAULT);
+
+ return 0;
+}
+
+/*
+ * Return the correct CPP address, and fixup xpb_addr as needed,
+ * based upon NFP model.
+ */
+static uint32_t
+nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr)
+{
+ uint32_t xpb;
+ int island;
+
+ if (!NFP_CPP_MODEL_IS_6000(cpp->model))
+ return 0;
+
+ xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
+
+ /*
+ * Ensure that non-local XPB accesses go out through the
+ * global XPBM bus.
+ */
+ island = ((*xpb_addr) >> 24) & 0x3f;
+
+ if (!island)
+ return xpb;
+
+ if (island == 1) {
+ /*
+ * Accesses to the ARM Island overlay uses Island 0
+ * Global Bit
+ */
+ (*xpb_addr) &= ~0x7f000000;
+ if (*xpb_addr < 0x60000)
+ *xpb_addr |= (1 << 30);
+ else
+ /* And only non-ARM interfaces use island id = 1 */
+ if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) !=
+ NFP_CPP_INTERFACE_TYPE_ARM)
+ *xpb_addr |= (1 << 24);
+ } else {
+ (*xpb_addr) |= (1 << 30);
+ }
+
+ return xpb;
+}
+
+int
+nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t *value)
+{
+ int sz;
+ uint32_t tmp = 0;
+
+ sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_32(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_32(value);
+ sz = nfp_cpp_area_write(area, offset, &value, sizeof(value));
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t *value)
+{
+ int sz;
+ uint64_t tmp = 0;
+
+ sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_64(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_64(value);
+ sz = nfp_cpp_area_write(area, offset, &value, sizeof(value));
+
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint32_t *value)
+{
+ int sz;
+ uint32_t tmp;
+
+ sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_32(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint32_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_32(value);
+ sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value));
+
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint64_t *value)
+{
+ int sz;
+ uint64_t tmp;
+
+ sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_64(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint64_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_64(value);
+ sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value));
+
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t value)
+{
+ uint32_t cpp_dest;
+
+ cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+ return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
+}
+
+int
+nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t *value)
+{
+ uint32_t cpp_dest;
+
+ cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+ return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
+}
+
+static struct nfp_cpp *
+nfp_cpp_alloc(const char *devname, int driver_lock_needed)
+{
+ const struct nfp_cpp_operations *ops;
+ struct nfp_cpp *cpp;
+ int err;
+
+ ops = nfp_cpp_transport_operations();
+
+ if (!ops || !ops->init)
+ return NFP_ERRPTR(EINVAL);
+
+ cpp = calloc(1, sizeof(*cpp));
+ if (!cpp)
+ return NULL;
+
+ cpp->op = ops;
+ cpp->driver_lock_needed = driver_lock_needed;
+
+ if (cpp->op->init) {
+ err = cpp->op->init(cpp, devname);
+ if (err < 0) {
+ free(cpp);
+ return NULL;
+ }
+ }
+
+ if (NFP_CPP_MODEL_IS_6000(nfp_cpp_model(cpp))) {
+ uint32_t xpbaddr;
+ size_t tgt;
+
+ for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
+ /* Hardcoded XPB IMB Base, island 0 */
+ xpbaddr = 0x000a0000 + (tgt * 4);
+ err = nfp_xpb_readl(cpp, xpbaddr,
+ (uint32_t *)&cpp->imb_cat_table[tgt]);
+ if (err < 0) {
+ free(cpp);
+ return NULL;
+ }
+ }
+ }
+
+ return cpp;
+}
+
+/*
+ * nfp_cpp_free - free the CPP handle
+ * @cpp: CPP handle
+ */
+void
+nfp_cpp_free(struct nfp_cpp *cpp)
+{
+ if (cpp->op && cpp->op->free)
+ cpp->op->free(cpp);
+
+ if (cpp->serial_len)
+ free(cpp->serial);
+
+ free(cpp);
+}
+
+struct nfp_cpp *
+nfp_cpp_from_device_name(const char *devname, int driver_lock_needed)
+{
+ return nfp_cpp_alloc(devname, driver_lock_needed);
+}
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to modify
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value)
+{
+ int err;
+ uint32_t tmp;
+
+ err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
+ if (err < 0)
+ return err;
+
+ tmp &= ~mask;
+ tmp |= (mask & value);
+ return nfp_xpb_writel(cpp, xpb_tgt, tmp);
+}
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to monitor for
+ * @param timeout_us maximum number of us to wait (-1 for forever)
+ *
+ * @return >= 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value, int timeout_us)
+{
+ uint32_t tmp;
+ int err;
+
+ do {
+ err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
+ if (err < 0)
+ goto exit;
+
+ if ((tmp & mask) == (value & mask)) {
+ if (timeout_us < 0)
+ timeout_us = 0;
+ break;
+ }
+
+ if (timeout_us < 0)
+ continue;
+
+ timeout_us -= 100;
+ usleep(100);
+ } while (timeout_us >= 0);
+
+ if (timeout_us < 0)
+ err = NFP_ERRNO(ETIMEDOUT);
+ else
+ err = timeout_us;
+
+exit:
+ return err;
+}
+
+/*
+ * nfp_cpp_read - read from CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer for result
+ * @length: number of bytes to read
+ */
+int
+nfp_cpp_read(struct nfp_cpp *cpp, uint32_t destination,
+ unsigned long long address, void *kernel_vaddr, size_t length)
+{
+ struct nfp_cpp_area *area;
+ int err;
+
+ area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length);
+ if (!area) {
+ printf("Area allocation/acquire failed\n");
+ return -1;
+ }
+
+ err = nfp_cpp_area_read(area, 0, kernel_vaddr, length);
+
+ nfp_cpp_area_release_free(area);
+ return err;
+}
+
+/*
+ * nfp_cpp_write - write to CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer to read from
+ * @length: number of bytes to write
+ */
+int
+nfp_cpp_write(struct nfp_cpp *cpp, uint32_t destination,
+ unsigned long long address, const void *kernel_vaddr,
+ size_t length)
+{
+ struct nfp_cpp_area *area;
+ int err;
+
+ area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length);
+ if (!area)
+ return -1;
+
+ err = nfp_cpp_area_write(area, 0, kernel_vaddr, length);
+
+ nfp_cpp_area_release_free(area);
+ return err;
+}
+
+/*
+ * nfp_cpp_area_fill - fill a CPP area with a value
+ * @area: CPP area
+ * @offset: offset into CPP area
+ * @value: value to fill with
+ * @length: length of area to fill
+ */
+int
+nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value, size_t length)
+{
+ int err;
+ size_t i;
+ uint64_t value64;
+
+ value = rte_cpu_to_le_32(value);
+ value64 = ((uint64_t)value << 32) | value;
+
+ if ((offset + length) > area->size)
+ return NFP_ERRNO(EINVAL);
+
+ if ((area->offset + offset) & 3)
+ return NFP_ERRNO(EINVAL);
+
+ if (((area->offset + offset) & 7) == 4 && length >= 4) {
+ err = nfp_cpp_area_write(area, offset, &value, sizeof(value));
+ if (err < 0)
+ return err;
+ if (err != sizeof(value))
+ return NFP_ERRNO(ENOSPC);
+ offset += sizeof(value);
+ length -= sizeof(value);
+ }
+
+ for (i = 0; (i + sizeof(value)) < length; i += sizeof(value64)) {
+ err =
+ nfp_cpp_area_write(area, offset + i, &value64,
+ sizeof(value64));
+ if (err < 0)
+ return err;
+ if (err != sizeof(value64))
+ return NFP_ERRNO(ENOSPC);
+ }
+
+ if ((i + sizeof(value)) <= length) {
+ err =
+ nfp_cpp_area_write(area, offset + i, &value, sizeof(value));
+ if (err < 0)
+ return err;
+ if (err != sizeof(value))
+ return NFP_ERRNO(ENOSPC);
+ i += sizeof(value);
+ }
+
+ return (int)i;
+}
+
+/*
+ * NOTE: This code should not use nfp_xpb_* functions,
+ * as those are model-specific
+ */
+uint32_t
+__nfp_cpp_model_autodetect(struct nfp_cpp *cpp)
+{
+ uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0);
+ uint32_t model = 0;
+
+ nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model);
+
+ if (NFP_CPP_MODEL_IS_6000(model)) {
+ uint32_t tmp;
+
+ nfp_cpp_model_set(cpp, model);
+
+ /* The PL's PluDeviceID revision code is authoratative */
+ model &= ~0xff;
+ nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) +
+ NFP_PL_DEVICE_ID, &tmp);
+ model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10;
+ }
+
+ return model;
+}
+
+/*
+ * nfp_cpp_map_area() - Helper function to map an area
+ * @cpp: NFP CPP handler
+ * @domain: CPP domain
+ * @target: CPP target
+ * @addr: CPP address
+ * @size: Size of the area
+ * @area: Area handle (output)
+ *
+ * Map an area of IOMEM access. To undo the effect of this function call
+ * @nfp_cpp_area_release_free(*area).
+ *
+ * Return: Pointer to memory mapped area or ERR_PTR
+ */
+uint8_t *
+nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, uint64_t addr,
+ unsigned long size, struct nfp_cpp_area **area)
+{
+ uint8_t *res;
+ uint32_t dest;
+
+ dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain);
+
+ *area = nfp_cpp_area_alloc_acquire(cpp, dest, addr, size);
+ if (!*area)
+ goto err_eio;
+
+ res = nfp_cpp_area_iomem(*area);
+ if (!res)
+ goto err_release_free;
+
+ return res;
+
+err_release_free:
+ nfp_cpp_area_release_free(*area);
+err_eio:
+ return NULL;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_crc.c b/drivers/net/nfp/nfpcore/nfp_crc.c
new file mode 100644
index 00000000..20431bf8
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_crc.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include "nfp_crc.h"
+
+static inline uint32_t
+nfp_crc32_be_generic(uint32_t crc, unsigned char const *p, size_t len,
+ uint32_t polynomial)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 24;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial :
+ 0);
+ }
+ return crc;
+}
+
+static inline uint32_t
+nfp_crc32_be(uint32_t crc, unsigned char const *p, size_t len)
+{
+ return nfp_crc32_be_generic(crc, p, len, CRCPOLY_BE);
+}
+
+static uint32_t
+nfp_crc32_posix_end(uint32_t crc, size_t total_len)
+{
+ /* Extend with the length of the string. */
+ while (total_len != 0) {
+ uint8_t c = total_len & 0xff;
+
+ crc = nfp_crc32_be(crc, &c, 1);
+ total_len >>= 8;
+ }
+
+ return ~crc;
+}
+
+uint32_t
+nfp_crc32_posix(const void *buff, size_t len)
+{
+ return nfp_crc32_posix_end(nfp_crc32_be(0, buff, len), len);
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_crc.h b/drivers/net/nfp/nfpcore/nfp_crc.h
new file mode 100644
index 00000000..f99c89fc
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_crc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CRC_H__
+#define __NFP_CRC_H__
+
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRCPOLY_LE 0xedb88320
+#define CRCPOLY_BE 0x04c11db7
+
+uint32_t nfp_crc32_posix(const void *buff, size_t len);
+
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/nfp/nfpcore/nfp_hwinfo.c
new file mode 100644
index 00000000..c0516bf8
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.c
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM
+ * after chip reset.
+ *
+ * Examples of the fields:
+ * me.count = 40
+ * me.mask = 0x7f_ffff_ffff
+ *
+ * me.count is the total number of MEs on the system.
+ * me.mask is the bitmask of MEs that are available for application usage.
+ *
+ * (ie, in this example, ME 39 has been reserved by boardconfig.)
+ */
+
+#include <stdio.h>
+#include <time.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp_resource.h"
+#include "nfp_hwinfo.h"
+#include "nfp_crc.h"
+
+static int
+nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo)
+{
+ return hwinfo->version & NFP_HWINFO_VERSION_UPDATING;
+}
+
+static int
+nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, uint32_t size)
+{
+ const char *key, *val, *end = hwinfo->data + size;
+
+ for (key = hwinfo->data; *key && key < end;
+ key = val + strlen(val) + 1) {
+ val = key + strlen(key) + 1;
+ if (val >= end) {
+ printf("Bad HWINFO - overflowing key\n");
+ return -EINVAL;
+ }
+
+ if (val + strlen(val) + 1 > end) {
+ printf("Bad HWINFO - overflowing value\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int
+nfp_hwinfo_db_validate(struct nfp_hwinfo *db, uint32_t len)
+{
+ uint32_t size, new_crc, *crc;
+
+ size = db->size;
+ if (size > len) {
+ printf("Unsupported hwinfo size %u > %u\n", size, len);
+ return -EINVAL;
+ }
+
+ size -= sizeof(uint32_t);
+ new_crc = nfp_crc32_posix((char *)db, size);
+ crc = (uint32_t *)(db->start + size);
+ if (new_crc != *crc) {
+ printf("Corrupt hwinfo table (CRC mismatch)\n");
+ printf("\tcalculated 0x%x, expected 0x%x\n", new_crc, *crc);
+ return -EINVAL;
+ }
+
+ return nfp_hwinfo_db_walk(db, size);
+}
+
+static struct nfp_hwinfo *
+nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
+{
+ struct nfp_hwinfo *header;
+ void *res;
+ uint64_t cpp_addr;
+ uint32_t cpp_id;
+ int err;
+ uint8_t *db;
+
+ res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO);
+ if (res) {
+ cpp_id = nfp_resource_cpp_id(res);
+ cpp_addr = nfp_resource_address(res);
+ *cpp_size = nfp_resource_size(res);
+
+ nfp_resource_release(res);
+
+ if (*cpp_size < HWINFO_SIZE_MIN)
+ return NULL;
+ } else {
+ return NULL;
+ }
+
+ db = malloc(*cpp_size + 1);
+ if (!db)
+ return NULL;
+
+ err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
+ if (err != (int)*cpp_size)
+ goto exit_free;
+
+ header = (void *)db;
+ printf("NFP HWINFO header: %08x\n", *(uint32_t *)header);
+ if (nfp_hwinfo_is_updating(header))
+ goto exit_free;
+
+ if (header->version != NFP_HWINFO_VERSION_2) {
+ printf("Unknown HWInfo version: 0x%08x\n",
+ header->version);
+ goto exit_free;
+ }
+
+ /* NULL-terminate for safety */
+ db[*cpp_size] = '\0';
+
+ return (void *)db;
+exit_free:
+ free(db);
+ return NULL;
+}
+
+static struct nfp_hwinfo *
+nfp_hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
+{
+ struct timespec wait;
+ struct nfp_hwinfo *db;
+ int count;
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 10000000;
+ count = 0;
+
+ for (;;) {
+ db = nfp_hwinfo_try_fetch(cpp, hwdb_size);
+ if (db)
+ return db;
+
+ nanosleep(&wait, NULL);
+ if (count++ > 200) {
+ printf("NFP access error\n");
+ return NULL;
+ }
+ }
+}
+
+struct nfp_hwinfo *
+nfp_hwinfo_read(struct nfp_cpp *cpp)
+{
+ struct nfp_hwinfo *db;
+ size_t hwdb_size = 0;
+ int err;
+
+ db = nfp_hwinfo_fetch(cpp, &hwdb_size);
+ if (!db)
+ return NULL;
+
+ err = nfp_hwinfo_db_validate(db, hwdb_size);
+ if (err) {
+ free(db);
+ return NULL;
+ }
+ return db;
+}
+
+/*
+ * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name
+ * @hwinfo: NFP HWinfo table
+ * @lookup: HWInfo name to search for
+ *
+ * Return: Value of the HWInfo name, or NULL
+ */
+const char *
+nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
+{
+ const char *key, *val, *end;
+
+ if (!hwinfo || !lookup)
+ return NULL;
+
+ end = hwinfo->data + hwinfo->size - sizeof(uint32_t);
+
+ for (key = hwinfo->data; *key && key < end;
+ key = val + strlen(val) + 1) {
+ val = key + strlen(key) + 1;
+
+ if (strcmp(key, lookup) == 0)
+ return val;
+ }
+
+ return NULL;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.h b/drivers/net/nfp/nfpcore/nfp_hwinfo.h
new file mode 100644
index 00000000..ccc61632
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_HWINFO_H__
+#define __NFP_HWINFO_H__
+
+#include <inttypes.h>
+
+#define HWINFO_SIZE_MIN 0x100
+
+/*
+ * The Hardware Info Table defines the properties of the system.
+ *
+ * HWInfo v1 Table (fixed size)
+ *
+ * 0x0000: uint32_t version Hardware Info Table version (1.0)
+ * 0x0004: uint32_t size Total size of the table, including the
+ * CRC32 (IEEE 802.3)
+ * 0x0008: uint32_t jumptab Offset of key/value table
+ * 0x000c: uint32_t keys Total number of keys in the key/value
+ * table
+ * NNNNNN: Key/value jump table and string data
+ * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * HWInfo v2 Table (variable size)
+ *
+ * 0x0000: uint32_t version Hardware Info Table version (2.0)
+ * 0x0004: uint32_t size Current size of the data area, excluding
+ * CRC32
+ * 0x0008: uint32_t limit Maximum size of the table
+ * 0x000c: uint32_t reserved Unused, set to zero
+ * NNNNNN: Key/value data
+ * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * If the HWInfo table is in the process of being updated, the low bit of
+ * version will be set.
+ *
+ * HWInfo v1 Key/Value Table
+ * -------------------------
+ *
+ * The key/value table is a set of offsets to ASCIIZ strings which have
+ * been strcmp(3) sorted (yes, please use bsearch(3) on the table).
+ *
+ * All keys are guaranteed to be unique.
+ *
+ * N+0: uint32_t key_1 Offset to the first key
+ * N+4: uint32_t val_1 Offset to the first value
+ * N+8: uint32_t key_2 Offset to the second key
+ * N+c: uint32_t val_2 Offset to the second value
+ * ...
+ *
+ * HWInfo v2 Key/Value Table
+ * -------------------------
+ *
+ * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000'
+ *
+ * Unsorted.
+ */
+
+#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_UPDATING BIT(0)
+
+struct nfp_hwinfo {
+ uint8_t start[0];
+
+ uint32_t version;
+ uint32_t size;
+
+ /* v2 specific fields */
+ uint32_t limit;
+ uint32_t resv;
+
+ char data[];
+};
+
+struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
+
+const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
+
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_mip.c b/drivers/net/nfp/nfpcore/nfp_mip.c
new file mode 100644
index 00000000..c86966df
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_mip.c
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <rte_byteorder.h>
+
+#include "nfp_cpp.h"
+#include "nfp_mip.h"
+#include "nfp_nffw.h"
+
+#define NFP_MIP_SIGNATURE rte_cpu_to_le_32(0x0050494d) /* "MIP\0" */
+#define NFP_MIP_VERSION rte_cpu_to_le_32(1)
+#define NFP_MIP_MAX_OFFSET (256 * 1024)
+
+struct nfp_mip {
+ uint32_t signature;
+ uint32_t mip_version;
+ uint32_t mip_size;
+ uint32_t first_entry;
+
+ uint32_t version;
+ uint32_t buildnum;
+ uint32_t buildtime;
+ uint32_t loadtime;
+
+ uint32_t symtab_addr;
+ uint32_t symtab_size;
+ uint32_t strtab_addr;
+ uint32_t strtab_size;
+
+ char name[16];
+ char toolchain[32];
+};
+
+/* Read memory and check if it could be a valid MIP */
+static int
+nfp_mip_try_read(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr,
+ struct nfp_mip *mip)
+{
+ int ret;
+
+ ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip));
+ if (ret != sizeof(*mip)) {
+ printf("Failed to read MIP data (%d, %zu)\n",
+ ret, sizeof(*mip));
+ return -EIO;
+ }
+ if (mip->signature != NFP_MIP_SIGNATURE) {
+ printf("Incorrect MIP signature (0x%08x)\n",
+ rte_le_to_cpu_32(mip->signature));
+ return -EINVAL;
+ }
+ if (mip->mip_version != NFP_MIP_VERSION) {
+ printf("Unsupported MIP version (%d)\n",
+ rte_le_to_cpu_32(mip->mip_version));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Try to locate MIP using the resource table */
+static int
+nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip)
+{
+ struct nfp_nffw_info *nffw_info;
+ uint32_t cpp_id;
+ uint64_t addr;
+ int err;
+
+ nffw_info = nfp_nffw_info_open(cpp);
+ if (!nffw_info)
+ return -ENODEV;
+
+ err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr);
+ if (err)
+ goto exit_close_nffw;
+
+ err = nfp_mip_try_read(cpp, cpp_id, addr, mip);
+exit_close_nffw:
+ nfp_nffw_info_close(nffw_info);
+ return err;
+}
+
+/*
+ * nfp_mip_open() - Get device MIP structure
+ * @cpp: NFP CPP Handle
+ *
+ * Copy MIP structure from NFP device and return it. The returned
+ * structure is handled internally by the library and should be
+ * freed by calling nfp_mip_close().
+ *
+ * Return: pointer to mip, NULL on failure.
+ */
+struct nfp_mip *
+nfp_mip_open(struct nfp_cpp *cpp)
+{
+ struct nfp_mip *mip;
+ int err;
+
+ mip = malloc(sizeof(*mip));
+ if (!mip)
+ return NULL;
+
+ err = nfp_mip_read_resource(cpp, mip);
+ if (err) {
+ free(mip);
+ return NULL;
+ }
+
+ mip->name[sizeof(mip->name) - 1] = 0;
+
+ return mip;
+}
+
+void
+nfp_mip_close(struct nfp_mip *mip)
+{
+ free(mip);
+}
+
+const char *
+nfp_mip_name(const struct nfp_mip *mip)
+{
+ return mip->name;
+}
+
+/*
+ * nfp_mip_symtab() - Get the address and size of the MIP symbol table
+ * @mip: MIP handle
+ * @addr: Location for NFP DDR address of MIP symbol table
+ * @size: Location for size of MIP symbol table
+ */
+void
+nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size)
+{
+ *addr = rte_le_to_cpu_32(mip->symtab_addr);
+ *size = rte_le_to_cpu_32(mip->symtab_size);
+}
+
+/*
+ * nfp_mip_strtab() - Get the address and size of the MIP symbol name table
+ * @mip: MIP handle
+ * @addr: Location for NFP DDR address of MIP symbol name table
+ * @size: Location for size of MIP symbol name table
+ */
+void
+nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size)
+{
+ *addr = rte_le_to_cpu_32(mip->strtab_addr);
+ *size = rte_le_to_cpu_32(mip->strtab_size);
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_mip.h b/drivers/net/nfp/nfpcore/nfp_mip.h
new file mode 100644
index 00000000..d0919b58
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_mip.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_MIP_H__
+#define __NFP_MIP_H__
+
+#include "nfp_nffw.h"
+
+struct nfp_mip;
+
+struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp);
+void nfp_mip_close(struct nfp_mip *mip);
+
+const char *nfp_mip_name(const struct nfp_mip *mip);
+void nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size);
+void nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size);
+int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id,
+ uint64_t *off);
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_mutex.c b/drivers/net/nfp/nfpcore/nfp_mutex.c
new file mode 100644
index 00000000..318c5800
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_mutex.c
@@ -0,0 +1,424 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <errno.h>
+
+#include <malloc.h>
+#include <time.h>
+#include <sched.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+#define MUTEX_LOCKED(interface) ((((uint32_t)(interface)) << 16) | 0x000f)
+#define MUTEX_UNLOCK(interface) (0 | 0x0000)
+
+#define MUTEX_IS_LOCKED(value) (((value) & 0xffff) == 0x000f)
+#define MUTEX_IS_UNLOCKED(value) (((value) & 0xffff) == 0x0000)
+#define MUTEX_INTERFACE(value) (((value) >> 16) & 0xffff)
+
+/*
+ * If you need more than 65536 recursive locks, please
+ * rethink your code.
+ */
+#define MUTEX_DEPTH_MAX 0xffff
+
+struct nfp_cpp_mutex {
+ struct nfp_cpp *cpp;
+ uint8_t target;
+ uint16_t depth;
+ unsigned long long address;
+ uint32_t key;
+ unsigned int usage;
+ struct nfp_cpp_mutex *prev, *next;
+};
+
+static int
+_nfp_cpp_mutex_validate(uint32_t model, int *target, unsigned long long address)
+{
+ /* Address must be 64-bit aligned */
+ if (address & 7)
+ return NFP_ERRNO(EINVAL);
+
+ if (NFP_CPP_MODEL_IS_6000(model)) {
+ if (*target != NFP_CPP_TARGET_MU)
+ return NFP_ERRNO(EINVAL);
+ } else {
+ return NFP_ERRNO(EINVAL);
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize a mutex location
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * will initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this
+ * nfp_cpp_interface().
+ *
+ * This function should only be called when setting up
+ * the initial lock state upon boot-up of the system.
+ *
+ * @param mutex NFP CPP Mutex handle
+ * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or
+ * NFP_CPP_TARGET_MU)
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key Unique 32-bit value for this mutex
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address,
+ uint32_t key)
+{
+ uint32_t model = nfp_cpp_model(cpp);
+ uint32_t muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
+ int err;
+
+ err = _nfp_cpp_mutex_validate(model, &target, address);
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_writel(cpp, muw, address + 4, key);
+ if (err < 0)
+ return err;
+
+ err =
+ nfp_cpp_writel(cpp, muw, address + 0,
+ MUTEX_LOCKED(nfp_cpp_interface(cpp)));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Create a mutex handle from an address controlled by a MU Atomic engine
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * reserve 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the
+ * MU Atomic Engine are supported.
+ *
+ * @param cpp NFP CPP handle
+ * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or
+ * NFP_CPP_TARGET_MU)
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key 32-bit unique key (must match the key at this location)
+ *
+ * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
+ */
+struct nfp_cpp_mutex *
+nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address, uint32_t key)
+{
+ uint32_t model = nfp_cpp_model(cpp);
+ struct nfp_cpp_mutex *mutex;
+ uint32_t mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
+ int err;
+ uint32_t tmp;
+
+ /* Look for cached mutex */
+ for (mutex = cpp->mutex_cache; mutex; mutex = mutex->next) {
+ if (mutex->target == target && mutex->address == address)
+ break;
+ }
+
+ if (mutex) {
+ if (mutex->key == key) {
+ mutex->usage++;
+ return mutex;
+ }
+
+ /* If the key doesn't match... */
+ return NFP_ERRPTR(EEXIST);
+ }
+
+ err = _nfp_cpp_mutex_validate(model, &target, address);
+ if (err < 0)
+ return NULL;
+
+ err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
+ if (err < 0)
+ return NULL;
+
+ if (tmp != key)
+ return NFP_ERRPTR(EEXIST);
+
+ mutex = calloc(sizeof(*mutex), 1);
+ if (!mutex)
+ return NFP_ERRPTR(ENOMEM);
+
+ mutex->cpp = cpp;
+ mutex->target = target;
+ mutex->address = address;
+ mutex->key = key;
+ mutex->depth = 0;
+ mutex->usage = 1;
+
+ /* Add mutex to the cache */
+ if (cpp->mutex_cache) {
+ cpp->mutex_cache->prev = mutex;
+ mutex->next = cpp->mutex_cache;
+ cpp->mutex_cache = mutex;
+ } else {
+ cpp->mutex_cache = mutex;
+ }
+
+ return mutex;
+}
+
+struct nfp_cpp *
+nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->cpp;
+}
+
+uint32_t
+nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->key;
+}
+
+uint16_t
+nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex)
+{
+ uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ uint32_t value, key;
+ int err;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ return err;
+
+ if (key != mutex->key)
+ return NFP_ERRNO(EPERM);
+
+ if (!MUTEX_IS_LOCKED(value))
+ return 0;
+
+ return MUTEX_INTERFACE(value);
+}
+
+int
+nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->target;
+}
+
+uint64_t
+nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->address;
+}
+
+/*
+ * Free a mutex handle - does not alter the lock state
+ *
+ * @param mutex NFP CPP Mutex handle
+ */
+void
+nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
+{
+ mutex->usage--;
+ if (mutex->usage > 0)
+ return;
+
+ /* Remove mutex from the cache */
+ if (mutex->next)
+ mutex->next->prev = mutex->prev;
+ if (mutex->prev)
+ mutex->prev->next = mutex->next;
+
+ /* If mutex->cpp == NULL, something broke */
+ if (mutex->cpp && mutex == mutex->cpp->mutex_cache)
+ mutex->cpp->mutex_cache = mutex->next;
+
+ free(mutex);
+}
+
+/*
+ * Lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
+{
+ int err;
+ time_t warn_at = time(NULL) + 15;
+
+ while ((err = nfp_cpp_mutex_trylock(mutex)) != 0) {
+ /* If errno != EBUSY, then the lock was damaged */
+ if (err < 0 && errno != EBUSY)
+ return err;
+ if (time(NULL) >= warn_at) {
+ printf("Warning: waiting for NFP mutex\n");
+ printf("\tusage:%u\n", mutex->usage);
+ printf("\tdepth:%hd]\n", mutex->depth);
+ printf("\ttarget:%d\n", mutex->target);
+ printf("\taddr:%llx\n", mutex->address);
+ printf("\tkey:%08x]\n", mutex->key);
+ warn_at = time(NULL) + 60;
+ }
+ sched_yield();
+ }
+ return 0;
+}
+
+/*
+ * Unlock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
+{
+ uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ struct nfp_cpp *cpp = mutex->cpp;
+ uint32_t key, value;
+ uint16_t interface = nfp_cpp_interface(cpp);
+ int err;
+
+ if (mutex->depth > 1) {
+ mutex->depth--;
+ return 0;
+ }
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+ if (err < 0)
+ goto exit;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ goto exit;
+
+ if (key != mutex->key) {
+ err = NFP_ERRNO(EPERM);
+ goto exit;
+ }
+
+ if (value != MUTEX_LOCKED(interface)) {
+ err = NFP_ERRNO(EACCES);
+ goto exit;
+ }
+
+ err = nfp_cpp_writel(cpp, muw, mutex->address, MUTEX_UNLOCK(interface));
+ if (err < 0)
+ goto exit;
+
+ mutex->depth = 0;
+
+exit:
+ return err;
+}
+
+/*
+ * Attempt to lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * Valid lock states:
+ *
+ * 0x....0000 - Unlocked
+ * 0x....000f - Locked
+ *
+ * @param mutex NFP CPP Mutex handle
+ * @return 0 if the lock succeeded, -1 on failure (and errno set
+ * appropriately).
+ */
+int
+nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
+{
+ uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ uint32_t mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
+ uint32_t key, value, tmp;
+ struct nfp_cpp *cpp = mutex->cpp;
+ int err;
+
+ if (mutex->depth > 0) {
+ if (mutex->depth == MUTEX_DEPTH_MAX)
+ return NFP_ERRNO(E2BIG);
+
+ mutex->depth++;
+ return 0;
+ }
+
+ /* Verify that the lock marker is not damaged */
+ err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ goto exit;
+
+ if (key != mutex->key) {
+ err = NFP_ERRNO(EPERM);
+ goto exit;
+ }
+
+ /*
+ * Compare against the unlocked state, and if true,
+ * write the interface id into the top 16 bits, and
+ * mark as locked.
+ */
+ value = MUTEX_LOCKED(nfp_cpp_interface(cpp));
+
+ /*
+ * We use test_set_imm here, as it implies a read
+ * of the current state, and sets the bits in the
+ * bytemask of the command to 1s. Since the mutex
+ * is guaranteed to be 64-bit aligned, the bytemask
+ * of this 32-bit command is ensured to be 8'b00001111,
+ * which implies that the lower 4 bits will be set to
+ * ones regardless of the initial state.
+ *
+ * Since this is a 'Readback' operation, with no Pull
+ * data, we can treat this as a normal Push (read)
+ * atomic, which returns the original value.
+ */
+ err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
+ if (err < 0)
+ goto exit;
+
+ /* Was it unlocked? */
+ if (MUTEX_IS_UNLOCKED(tmp)) {
+ /*
+ * The read value can only be 0x....0000 in the unlocked state.
+ * If there was another contending for this lock, then
+ * the lock state would be 0x....000f
+ *
+ * Write our owner ID into the lock
+ * While not strictly necessary, this helps with
+ * debug and bookkeeping.
+ */
+ err = nfp_cpp_writel(cpp, muw, mutex->address, value);
+ if (err < 0)
+ goto exit;
+
+ mutex->depth = 1;
+ goto exit;
+ }
+
+ /* Already locked by us? Success! */
+ if (tmp == value) {
+ mutex->depth = 1;
+ goto exit;
+ }
+
+ err = NFP_ERRNO(MUTEX_IS_LOCKED(tmp) ? EBUSY : EINVAL);
+
+exit:
+ return err;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.c b/drivers/net/nfp/nfpcore/nfp_nffw.c
new file mode 100644
index 00000000..8bec0e3c
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nffw.c
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include "nfp_cpp.h"
+#include "nfp_nffw.h"
+#include "nfp_mip.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp_resource.h"
+
+/*
+ * flg_info_version = flags[0]<27:16>
+ * This is a small version counter intended only to detect if the current
+ * implementation can read the current struct. Struct changes should be very
+ * rare and as such a 12-bit counter should cover large spans of time. By the
+ * time it wraps around, we don't expect to have 4096 versions of this struct
+ * to be in use at the same time.
+ */
+static uint32_t
+nffw_res_info_version_get(const struct nfp_nffw_info_data *res)
+{
+ return (res->flags[0] >> 16) & 0xfff;
+}
+
+/* flg_init = flags[0]<0> */
+static uint32_t
+nffw_res_flg_init_get(const struct nfp_nffw_info_data *res)
+{
+ return (res->flags[0] >> 0) & 1;
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<31:31> */
+static uint32_t
+nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi)
+{
+ return (fi->loaded__mu_da__mip_off_hi >> 31) & 1;
+}
+
+/* mip_cppid = mip_cppid */
+static uint32_t
+nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi)
+{
+ return fi->mip_cppid;
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<8:8> */
+static uint32_t
+nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi)
+{
+ return (fi->loaded__mu_da__mip_off_hi >> 8) & 1;
+}
+
+/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */
+static uint64_t
+nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi)
+{
+ uint64_t mip_off_hi = fi->loaded__mu_da__mip_off_hi;
+
+ return (mip_off_hi & 0xFF) << 32 | fi->mip_offset_lo;
+}
+
+#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12)
+
+static int
+nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp)
+{
+ unsigned int mode, addr40;
+ uint32_t xpbaddr, imbcppat;
+ int err;
+
+ /* Hardcoded XPB IMB Base, island 0 */
+ xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4;
+ err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat);
+ if (err < 0)
+ return err;
+
+ mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
+ addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
+
+ return nfp_cppat_mu_locality_lsb(mode, addr40);
+}
+
+static unsigned int
+nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
+{
+ /*
+ * For the this code, version 0 is most likely to be version 1 in this
+ * case. Since the kernel driver does not take responsibility for
+ * initialising the nfp.nffw resource, any previous code (CA firmware or
+ * userspace) that left the version 0 and did set the init flag is going
+ * to be version 1.
+ */
+ switch (nffw_res_info_version_get(fwinf)) {
+ case 0:
+ case 1:
+ *arr = &fwinf->info.v1.fwinfo[0];
+ return NFFW_FWINFO_CNT_V1;
+ case 2:
+ *arr = &fwinf->info.v2.fwinfo[0];
+ return NFFW_FWINFO_CNT_V2;
+ default:
+ *arr = NULL;
+ return 0;
+ }
+}
+
+/*
+ * nfp_nffw_info_open() - Acquire the lock on the NFFW table
+ * @cpp: NFP CPP handle
+ *
+ * Return: 0, or -ERRNO
+ */
+struct nfp_nffw_info *
+nfp_nffw_info_open(struct nfp_cpp *cpp)
+{
+ struct nfp_nffw_info_data *fwinf;
+ struct nfp_nffw_info *state;
+ uint32_t info_ver;
+ int err;
+
+ state = malloc(sizeof(*state));
+ if (!state)
+ return NULL;
+
+ memset(state, 0, sizeof(*state));
+
+ state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW);
+ if (!state->res)
+ goto err_free;
+
+ fwinf = &state->fwinf;
+
+ if (sizeof(*fwinf) > nfp_resource_size(state->res))
+ goto err_release;
+
+ err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
+ nfp_resource_address(state->res),
+ fwinf, sizeof(*fwinf));
+ if (err < (int)sizeof(*fwinf))
+ goto err_release;
+
+ if (!nffw_res_flg_init_get(fwinf))
+ goto err_release;
+
+ info_ver = nffw_res_info_version_get(fwinf);
+ if (info_ver > NFFW_INFO_VERSION_CURRENT)
+ goto err_release;
+
+ state->cpp = cpp;
+ return state;
+
+err_release:
+ nfp_resource_release(state->res);
+err_free:
+ free(state);
+ return NULL;
+}
+
+/*
+ * nfp_nffw_info_release() - Release the lock on the NFFW table
+ * @state: NFP FW info state
+ *
+ * Return: 0, or -ERRNO
+ */
+void
+nfp_nffw_info_close(struct nfp_nffw_info *state)
+{
+ nfp_resource_release(state->res);
+ free(state);
+}
+
+/*
+ * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW
+ * @state: NFP FW info state
+ *
+ * Return: First NFFW firmware info, NULL on failure
+ */
+static struct nffw_fwinfo *
+nfp_nffw_info_fwid_first(struct nfp_nffw_info *state)
+{
+ struct nffw_fwinfo *fwinfo;
+ unsigned int cnt, i;
+
+ cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo);
+ if (!cnt)
+ return NULL;
+
+ for (i = 0; i < cnt; i++)
+ if (nffw_fwinfo_loaded_get(&fwinfo[i]))
+ return &fwinfo[i];
+
+ return NULL;
+}
+
+/*
+ * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP
+ * @state: NFP FW info state
+ * @cpp_id: Pointer to the CPP ID of the MIP
+ * @off: Pointer to the CPP Address of the MIP
+ *
+ * Return: 0, or -ERRNO
+ */
+int
+nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id,
+ uint64_t *off)
+{
+ struct nffw_fwinfo *fwinfo;
+
+ fwinfo = nfp_nffw_info_fwid_first(state);
+ if (!fwinfo)
+ return -EINVAL;
+
+ *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo);
+ *off = nffw_fwinfo_mip_offset_get(fwinfo);
+
+ if (nffw_fwinfo_mip_mu_da_get(fwinfo)) {
+ int locality_off;
+
+ if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU)
+ return 0;
+
+ locality_off = nfp_mip_mu_locality_lsb(state->cpp);
+ if (locality_off < 0)
+ return locality_off;
+
+ *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off);
+ *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.h b/drivers/net/nfp/nfpcore/nfp_nffw.h
new file mode 100644
index 00000000..3bbdf1c1
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nffw.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_NFFW_H__
+#define __NFP_NFFW_H__
+
+#include "nfp-common/nfp_platform.h"
+#include "nfp_cpp.h"
+
+/*
+ * Init-CSR owner IDs for firmware map to firmware IDs which start at 4.
+ * Lower IDs are reserved for target and loader IDs.
+ */
+#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */
+#define NFFW_FWID_BASE 4
+
+#define NFFW_FWID_ALL 255
+
+/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4.
+ * Lower IDs are reserved for target and loader IDs.
+ */
+#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */
+#define NFFW_FWID_BASE 4
+
+#define NFFW_FWID_ALL 255
+
+/**
+ * NFFW_INFO_VERSION history:
+ * 0: This was never actually used (before versioning), but it refers to
+ * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later
+ * changed to 200.
+ * 1: First versioned struct, with
+ * FWINFO_CNT = 120
+ * MEINFO_CNT = 120
+ * 2: FWINFO_CNT = 200
+ * MEINFO_CNT = 200
+ */
+#define NFFW_INFO_VERSION_CURRENT 2
+
+/* Enough for all current chip families */
+#define NFFW_MEINFO_CNT_V1 120
+#define NFFW_FWINFO_CNT_V1 120
+#define NFFW_MEINFO_CNT_V2 200
+#define NFFW_FWINFO_CNT_V2 200
+
+struct nffw_meinfo {
+ uint32_t ctxmask__fwid__meid;
+};
+
+struct nffw_fwinfo {
+ uint32_t loaded__mu_da__mip_off_hi;
+ uint32_t mip_cppid; /* 0 means no MIP */
+ uint32_t mip_offset_lo;
+};
+
+struct nfp_nffw_info_v1 {
+ struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1];
+ struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1];
+};
+
+struct nfp_nffw_info_v2 {
+ struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2];
+ struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2];
+};
+
+struct nfp_nffw_info_data {
+ uint32_t flags[2];
+ union {
+ struct nfp_nffw_info_v1 v1;
+ struct nfp_nffw_info_v2 v2;
+ } info;
+};
+
+struct nfp_nffw_info {
+ struct nfp_cpp *cpp;
+ struct nfp_resource *res;
+
+ struct nfp_nffw_info_data fwinf;
+};
+
+struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp);
+void nfp_nffw_info_close(struct nfp_nffw_info *state);
+
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.c b/drivers/net/nfp/nfpcore/nfp_nsp.c
new file mode 100644
index 00000000..876a4017
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nsp.c
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#define NFP_SUBSYS "nfp_nsp"
+
+#include <stdio.h>
+#include <time.h>
+
+#include <rte_common.h>
+
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+#include "nfp_resource.h"
+
+int
+nfp_nsp_config_modified(struct nfp_nsp *state)
+{
+ return state->modified;
+}
+
+void
+nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified)
+{
+ state->modified = modified;
+}
+
+void *
+nfp_nsp_config_entries(struct nfp_nsp *state)
+{
+ return state->entries;
+}
+
+unsigned int
+nfp_nsp_config_idx(struct nfp_nsp *state)
+{
+ return state->idx;
+}
+
+void
+nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx)
+{
+ state->entries = entries;
+ state->idx = idx;
+}
+
+void
+nfp_nsp_config_clear_state(struct nfp_nsp *state)
+{
+ state->entries = NULL;
+ state->idx = 0;
+}
+
+static void
+nfp_nsp_print_extended_error(uint32_t ret_val)
+{
+ int i;
+
+ if (!ret_val)
+ return;
+
+ for (i = 0; i < (int)ARRAY_SIZE(nsp_errors); i++)
+ if (ret_val == (uint32_t)nsp_errors[i].code)
+ printf("err msg: %s\n", nsp_errors[i].msg);
+}
+
+static int
+nfp_nsp_check(struct nfp_nsp *state)
+{
+ struct nfp_cpp *cpp = state->cpp;
+ uint64_t nsp_status, reg;
+ uint32_t nsp_cpp;
+ int err;
+
+ nsp_cpp = nfp_resource_cpp_id(state->res);
+ nsp_status = nfp_resource_address(state->res) + NSP_STATUS;
+
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &reg);
+ if (err < 0)
+ return err;
+
+ if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) {
+ printf("Cannot detect NFP Service Processor\n");
+ return -ENODEV;
+ }
+
+ state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg);
+ state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg);
+
+ if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) {
+ printf("Unsupported ABI %hu.%hu\n", state->ver.major,
+ state->ver.minor);
+ return -EINVAL;
+ }
+
+ if (reg & NSP_STATUS_BUSY) {
+ printf("Service processor busy!\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * nfp_nsp_open() - Prepare for communication and lock the NSP resource.
+ * @cpp: NFP CPP Handle
+ */
+struct nfp_nsp *
+nfp_nsp_open(struct nfp_cpp *cpp)
+{
+ struct nfp_resource *res;
+ struct nfp_nsp *state;
+ int err;
+
+ res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
+ if (!res)
+ return NULL;
+
+ state = malloc(sizeof(*state));
+ if (!state) {
+ nfp_resource_release(res);
+ return NULL;
+ }
+ memset(state, 0, sizeof(*state));
+ state->cpp = cpp;
+ state->res = res;
+
+ err = nfp_nsp_check(state);
+ if (err) {
+ nfp_nsp_close(state);
+ return NULL;
+ }
+
+ return state;
+}
+
+/*
+ * nfp_nsp_close() - Clean up and unlock the NSP resource.
+ * @state: NFP SP state
+ */
+void
+nfp_nsp_close(struct nfp_nsp *state)
+{
+ nfp_resource_release(state->res);
+ free(state);
+}
+
+uint16_t
+nfp_nsp_get_abi_ver_major(struct nfp_nsp *state)
+{
+ return state->ver.major;
+}
+
+uint16_t
+nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state)
+{
+ return state->ver.minor;
+}
+
+static int
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, uint64_t *reg, uint32_t nsp_cpp,
+ uint64_t addr, uint64_t mask, uint64_t val)
+{
+ struct timespec wait;
+ int count;
+ int err;
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 25000000;
+ count = 0;
+
+ for (;;) {
+ err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg);
+ if (err < 0)
+ return err;
+
+ if ((*reg & mask) == val)
+ return 0;
+
+ nanosleep(&wait, 0);
+ if (count++ > 1000)
+ return -ETIMEDOUT;
+ }
+}
+
+/*
+ * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * @state: NFP SP state
+ * @code: NFP SP Command Code
+ * @option: NFP SP Command Argument
+ * @buff_cpp: NFP SP Buffer CPP Address info
+ * @buff_addr: NFP SP Buffer Host address
+ *
+ * Return: 0 for success with no result
+ *
+ * positive value for NSP completion with a result code
+ *
+ * -EAGAIN if the NSP is not yet present
+ * -ENODEV if the NSP is not a supported model
+ * -EBUSY if the NSP is stuck
+ * -EINTR if interrupted while waiting for completion
+ * -ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ */
+static int
+nfp_nsp_command(struct nfp_nsp *state, uint16_t code, uint32_t option,
+ uint32_t buff_cpp, uint64_t buff_addr)
+{
+ uint64_t reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
+ struct nfp_cpp *cpp = state->cpp;
+ uint32_t nsp_cpp;
+ int err;
+
+ nsp_cpp = nfp_resource_cpp_id(state->res);
+ nsp_base = nfp_resource_address(state->res);
+ nsp_status = nsp_base + NSP_STATUS;
+ nsp_command = nsp_base + NSP_COMMAND;
+ nsp_buffer = nsp_base + NSP_BUFFER;
+
+ err = nfp_nsp_check(state);
+ if (err)
+ return err;
+
+ if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) ||
+ !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) {
+ printf("Host buffer out of reach %08x %" PRIx64 "\n",
+ buff_cpp, buff_addr);
+ return -EINVAL;
+ }
+
+ err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
+ FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) |
+ FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr));
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
+ FIELD_PREP(NSP_COMMAND_OPTION, option) |
+ FIELD_PREP(NSP_COMMAND_CODE, code) |
+ FIELD_PREP(NSP_COMMAND_START, 1));
+ if (err < 0)
+ return err;
+
+ /* Wait for NSP_COMMAND_START to go to 0 */
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_command,
+ NSP_COMMAND_START, 0);
+ if (err) {
+ printf("Error %d waiting for code 0x%04x to start\n",
+ err, code);
+ return err;
+ }
+
+ /* Wait for NSP_STATUS_BUSY to go to 0 */
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status, NSP_STATUS_BUSY,
+ 0);
+ if (err) {
+ printf("Error %d waiting for code 0x%04x to complete\n",
+ err, code);
+ return err;
+ }
+
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val);
+ if (err < 0)
+ return err;
+ ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val);
+
+ err = FIELD_GET(NSP_STATUS_RESULT, reg);
+ if (err) {
+ printf("Result (error) code set: %d (%d) command: %d\n",
+ -err, (int)ret_val, code);
+ nfp_nsp_print_extended_error(ret_val);
+ return -err;
+ }
+
+ return ret_val;
+}
+
+#define SZ_1M 0x00100000
+
+static int
+nfp_nsp_command_buf(struct nfp_nsp *nsp, uint16_t code, uint32_t option,
+ const void *in_buf, unsigned int in_size, void *out_buf,
+ unsigned int out_size)
+{
+ struct nfp_cpp *cpp = nsp->cpp;
+ unsigned int max_size;
+ uint64_t reg, cpp_buf;
+ int ret, err;
+ uint32_t cpp_id;
+
+ if (nsp->ver.minor < 13) {
+ printf("NSP: Code 0x%04x with buffer not supported\n", code);
+ printf("\t(ABI %hu.%hu)\n", nsp->ver.major, nsp->ver.minor);
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) +
+ NSP_DFLT_BUFFER_CONFIG,
+ &reg);
+ if (err < 0)
+ return err;
+
+ max_size = RTE_MAX(in_size, out_size);
+ if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) {
+ printf("NSP: default buffer too small for command 0x%04x\n",
+ code);
+ printf("\t(%llu < %u)\n",
+ FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M,
+ max_size);
+ return -EINVAL;
+ }
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) +
+ NSP_DFLT_BUFFER,
+ &reg);
+ if (err < 0)
+ return err;
+
+ cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8;
+ cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg);
+
+ if (in_buf && in_size) {
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
+ if (err < 0)
+ return err;
+ }
+ /* Zero out remaining part of the buffer */
+ if (out_buf && out_size && out_size > in_size) {
+ memset(out_buf, 0, out_size - in_size);
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, out_buf,
+ out_size - in_size);
+ if (err < 0)
+ return err;
+ }
+
+ ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+ if (ret < 0)
+ return ret;
+
+ if (out_buf && out_size) {
+ err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size);
+ if (err < 0)
+ return err;
+ }
+
+ return ret;
+}
+
+int
+nfp_nsp_wait(struct nfp_nsp *state)
+{
+ struct timespec wait;
+ int count;
+ int err;
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 25000000;
+ count = 0;
+
+ for (;;) {
+ err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0);
+ if (err != -EAGAIN)
+ break;
+
+ nanosleep(&wait, 0);
+
+ if (count++ > 1000) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+ if (err)
+ printf("NSP failed to respond %d\n", err);
+
+ return err;
+}
+
+int
+nfp_nsp_device_soft_reset(struct nfp_nsp *state)
+{
+ return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
+}
+
+int
+nfp_nsp_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0);
+}
+
+int
+nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, size, buf, size,
+ NULL, 0);
+}
+
+int
+nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
+ buf, size);
+}
+
+int
+nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf,
+ unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
+ NULL, 0);
+}
+
+int
+nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0,
+ buf, size);
+}
+
+int
+nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf,
+ unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, NULL,
+ 0, buf, size);
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h
new file mode 100644
index 00000000..c9c7b0d0
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nsp.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef NSP_NSP_H
+#define NSP_NSP_H 1
+
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (64 - 1 - (h))))
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define FIELD_GET(_mask, _reg) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \
+ }))
+
+#define FIELD_FIT(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \
+ }))
+
+#define FIELD_PREP(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \
+ }))
+
+/* Offsets relative to the CSR base */
+#define NSP_STATUS 0x00
+#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48)
+#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44)
+#define NSP_STATUS_MINOR GENMASK_ULL(43, 32)
+#define NSP_STATUS_CODE GENMASK_ULL(31, 16)
+#define NSP_STATUS_RESULT GENMASK_ULL(15, 8)
+#define NSP_STATUS_BUSY BIT_ULL(0)
+
+#define NSP_COMMAND 0x08
+#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32)
+#define NSP_COMMAND_CODE GENMASK_ULL(31, 16)
+#define NSP_COMMAND_START BIT_ULL(0)
+
+/* CPP address to retrieve the data from */
+#define NSP_BUFFER 0x10
+#define NSP_BUFFER_CPP GENMASK_ULL(63, 40)
+#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38)
+#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0)
+
+#define NSP_DFLT_BUFFER 0x18
+
+#define NSP_DFLT_BUFFER_CONFIG 0x20
+#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0)
+
+#define NSP_MAGIC 0xab10
+#define NSP_MAJOR 0
+#define NSP_MINOR 8
+
+#define NSP_CODE_MAJOR GENMASK(15, 12)
+#define NSP_CODE_MINOR GENMASK(11, 0)
+
+enum nfp_nsp_cmd {
+ SPCODE_NOOP = 0, /* No operation */
+ SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
+ SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */
+ SPCODE_PHY_INIT = 3, /* Initialize the PHY */
+ SPCODE_MAC_INIT = 4, /* Initialize the MAC */
+ SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */
+ SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
+ SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
+ SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */
+ SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
+};
+
+static const struct {
+ int code;
+ const char *msg;
+} nsp_errors[] = {
+ { 6010, "could not map to phy for port" },
+ { 6011, "not an allowed rate/lanes for port" },
+ { 6012, "not an allowed rate/lanes for port" },
+ { 6013, "high/low error, change other port first" },
+ { 6014, "config not found in flash" },
+};
+
+struct nfp_nsp {
+ struct nfp_cpp *cpp;
+ struct nfp_resource *res;
+ struct {
+ uint16_t major;
+ uint16_t minor;
+ } ver;
+
+ /* Eth table config state */
+ int modified;
+ unsigned int idx;
+ void *entries;
+};
+
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
+void nfp_nsp_close(struct nfp_nsp *state);
+uint16_t nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
+uint16_t nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
+int nfp_nsp_wait(struct nfp_nsp *state);
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
+int nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_mac_reinit(struct nfp_nsp *state);
+int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
+ void *buf, unsigned int size);
+
+static inline int nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 20;
+}
+
+enum nfp_eth_interface {
+ NFP_INTERFACE_NONE = 0,
+ NFP_INTERFACE_SFP = 1,
+ NFP_INTERFACE_SFPP = 10,
+ NFP_INTERFACE_SFP28 = 28,
+ NFP_INTERFACE_QSFP = 40,
+ NFP_INTERFACE_CXP = 100,
+ NFP_INTERFACE_QSFP28 = 112,
+};
+
+enum nfp_eth_media {
+ NFP_MEDIA_DAC_PASSIVE = 0,
+ NFP_MEDIA_DAC_ACTIVE,
+ NFP_MEDIA_FIBRE,
+};
+
+enum nfp_eth_aneg {
+ NFP_ANEG_AUTO = 0,
+ NFP_ANEG_SEARCH,
+ NFP_ANEG_25G_CONSORTIUM,
+ NFP_ANEG_25G_IEEE,
+ NFP_ANEG_DISABLED,
+};
+
+enum nfp_eth_fec {
+ NFP_FEC_AUTO_BIT = 0,
+ NFP_FEC_BASER_BIT,
+ NFP_FEC_REED_SOLOMON_BIT,
+ NFP_FEC_DISABLED_BIT,
+};
+
+#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT)
+#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT)
+#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT)
+#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT)
+
+#define ETH_ALEN 6
+
+/**
+ * struct nfp_eth_table - ETH table information
+ * @count: number of table entries
+ * @max_index: max of @index fields of all @ports
+ * @ports: table of ports
+ *
+ * @eth_index: port index according to legacy ethX numbering
+ * @index: chip-wide first channel index
+ * @nbi: NBI index
+ * @base: first channel index (within NBI)
+ * @lanes: number of channels
+ * @speed: interface speed (in Mbps)
+ * @interface: interface (module) plugged in
+ * @media: media type of the @interface
+ * @fec: forward error correction mode
+ * @aneg: auto negotiation mode
+ * @mac_addr: interface MAC address
+ * @label_port: port id
+ * @label_subport: id of interface within port (for split ports)
+ * @enabled: is enabled?
+ * @tx_enabled: is TX enabled?
+ * @rx_enabled: is RX enabled?
+ * @override_changed: is media reconfig pending?
+ *
+ * @port_type: one of %PORT_* defines for ethtool
+ * @port_lanes: total number of lanes on the port (sum of lanes of all subports)
+ * @is_split: is interface part of a split port
+ * @fec_modes_supported: bitmap of FEC modes supported
+ */
+struct nfp_eth_table {
+ unsigned int count;
+ unsigned int max_index;
+ struct nfp_eth_table_port {
+ unsigned int eth_index;
+ unsigned int index;
+ unsigned int nbi;
+ unsigned int base;
+ unsigned int lanes;
+ unsigned int speed;
+
+ unsigned int interface;
+ enum nfp_eth_media media;
+
+ enum nfp_eth_fec fec;
+ enum nfp_eth_aneg aneg;
+
+ uint8_t mac_addr[ETH_ALEN];
+
+ uint8_t label_port;
+ uint8_t label_subport;
+
+ int enabled;
+ int tx_enabled;
+ int rx_enabled;
+
+ int override_changed;
+
+ /* Computed fields */
+ uint8_t port_type;
+
+ unsigned int port_lanes;
+
+ int is_split;
+
+ unsigned int fec_modes_supported;
+ } ports[0];
+};
+
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
+
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable);
+int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
+ int configed);
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
+
+int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf,
+ unsigned int size);
+void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries,
+ unsigned int idx);
+void nfp_nsp_config_clear_state(struct nfp_nsp *state);
+void nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified);
+void *nfp_nsp_config_entries(struct nfp_nsp *state);
+int nfp_nsp_config_modified(struct nfp_nsp *state);
+unsigned int nfp_nsp_config_idx(struct nfp_nsp *state);
+
+static inline int nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
+{
+ return !!eth_port->fec_modes_supported;
+}
+
+static inline unsigned int
+nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port)
+{
+ return eth_port->fec_modes_supported;
+}
+
+struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx);
+int nfp_eth_config_commit_end(struct nfp_nsp *nsp);
+void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp);
+
+int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode);
+int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed);
+int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
+
+/**
+ * struct nfp_nsp_identify - NSP static information
+ * @version: opaque version string
+ * @flags: version flags
+ * @br_primary: branch id of primary bootloader
+ * @br_secondary: branch id of secondary bootloader
+ * @br_nsp: branch id of NSP
+ * @primary: version of primarary bootloader
+ * @secondary: version id of secondary bootloader
+ * @nsp: version id of NSP
+ * @sensor_mask: mask of present sensors available on NIC
+ */
+struct nfp_nsp_identify {
+ char version[40];
+ uint8_t flags;
+ uint8_t br_primary;
+ uint8_t br_secondary;
+ uint8_t br_nsp;
+ uint16_t primary;
+ uint16_t secondary;
+ uint16_t nsp;
+ uint64_t sensor_mask;
+};
+
+struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp);
+
+enum nfp_nsp_sensor_id {
+ NFP_SENSOR_CHIP_TEMPERATURE,
+ NFP_SENSOR_ASSEMBLY_POWER,
+ NFP_SENSOR_ASSEMBLY_12V_POWER,
+ NFP_SENSOR_ASSEMBLY_3V3_POWER,
+};
+
+int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
+ long *val);
+
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c
new file mode 100644
index 00000000..bfd1eddb
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <rte_byteorder.h>
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+#include "nfp_nffw.h"
+
+struct nsp_identify {
+ uint8_t version[40];
+ uint8_t flags;
+ uint8_t br_primary;
+ uint8_t br_secondary;
+ uint8_t br_nsp;
+ uint16_t primary;
+ uint16_t secondary;
+ uint16_t nsp;
+ uint8_t reserved[6];
+ uint64_t sensor_mask;
+};
+
+struct nfp_nsp_identify *
+__nfp_nsp_identify(struct nfp_nsp *nsp)
+{
+ struct nfp_nsp_identify *nspi = NULL;
+ struct nsp_identify *ni;
+ int ret;
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 15)
+ return NULL;
+
+ ni = malloc(sizeof(*ni));
+ if (!ni)
+ return NULL;
+
+ memset(ni, 0, sizeof(*ni));
+ ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni));
+ if (ret < 0) {
+ printf("reading bsp version failed %d\n",
+ ret);
+ goto exit_free;
+ }
+
+ nspi = malloc(sizeof(*nspi));
+ if (!nspi)
+ goto exit_free;
+
+ memset(nspi, 0, sizeof(*nspi));
+ memcpy(nspi->version, ni->version, sizeof(nspi->version));
+ nspi->version[sizeof(nspi->version) - 1] = '\0';
+ nspi->flags = ni->flags;
+ nspi->br_primary = ni->br_primary;
+ nspi->br_secondary = ni->br_secondary;
+ nspi->br_nsp = ni->br_nsp;
+ nspi->primary = rte_le_to_cpu_16(ni->primary);
+ nspi->secondary = rte_le_to_cpu_16(ni->secondary);
+ nspi->nsp = rte_le_to_cpu_16(ni->nsp);
+ nspi->sensor_mask = rte_le_to_cpu_64(ni->sensor_mask);
+
+exit_free:
+ free(ni);
+ return nspi;
+}
+
+struct nfp_sensors {
+ uint32_t chip_temp;
+ uint32_t assembly_power;
+ uint32_t assembly_12v_power;
+ uint32_t assembly_3v3_power;
+};
+
+int
+nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, long *val)
+{
+ struct nfp_sensors s;
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp)
+ return -EIO;
+
+ ret = nfp_nsp_read_sensors(nsp, BIT(id), &s, sizeof(s));
+ nfp_nsp_close(nsp);
+
+ if (ret < 0)
+ return ret;
+
+ switch (id) {
+ case NFP_SENSOR_CHIP_TEMPERATURE:
+ *val = rte_le_to_cpu_32(s.chip_temp);
+ break;
+ case NFP_SENSOR_ASSEMBLY_POWER:
+ *val = rte_le_to_cpu_32(s.assembly_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_12V_POWER:
+ *val = rte_le_to_cpu_32(s.assembly_12v_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_3V3_POWER:
+ *val = rte_le_to_cpu_32(s.assembly_3v3_power);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
new file mode 100644
index 00000000..67946891
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
@@ -0,0 +1,665 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+#include "nfp6000/nfp6000.h"
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (64 - 1 - (h))))
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define FIELD_GET(_mask, _reg) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \
+ }))
+
+#define FIELD_FIT(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \
+ }))
+
+#define FIELD_PREP(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \
+ }))
+
+#define NSP_ETH_NBI_PORT_COUNT 24
+#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
+#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \
+ sizeof(union eth_table_entry))
+
+#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0)
+#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8)
+#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48)
+#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
+#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60)
+#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61)
+
+#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES)
+
+#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0)
+#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
+#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8)
+#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12)
+#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20)
+#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
+#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
+#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
+
+#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
+#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
+#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
+#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
+#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
+#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
+
+/* Which connector port. */
+#define PORT_TP 0x00
+#define PORT_AUI 0x01
+#define PORT_MII 0x02
+#define PORT_FIBRE 0x03
+#define PORT_BNC 0x04
+#define PORT_DA 0x05
+#define PORT_NONE 0xef
+#define PORT_OTHER 0xff
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_5000 5000
+#define SPEED_10000 10000
+#define SPEED_14000 14000
+#define SPEED_20000 20000
+#define SPEED_25000 25000
+#define SPEED_40000 40000
+#define SPEED_50000 50000
+#define SPEED_56000 56000
+#define SPEED_100000 100000
+
+enum nfp_eth_raw {
+ NSP_ETH_RAW_PORT = 0,
+ NSP_ETH_RAW_STATE,
+ NSP_ETH_RAW_MAC,
+ NSP_ETH_RAW_CONTROL,
+
+ NSP_ETH_NUM_RAW
+};
+
+enum nfp_eth_rate {
+ RATE_INVALID = 0,
+ RATE_10M,
+ RATE_100M,
+ RATE_1G,
+ RATE_10G,
+ RATE_25G,
+};
+
+union eth_table_entry {
+ struct {
+ uint64_t port;
+ uint64_t state;
+ uint8_t mac_addr[6];
+ uint8_t resv[2];
+ uint64_t control;
+ };
+ uint64_t raw[NSP_ETH_NUM_RAW];
+};
+
+static const struct {
+ enum nfp_eth_rate rate;
+ unsigned int speed;
+} nsp_eth_rate_tbl[] = {
+ { RATE_INVALID, 0, },
+ { RATE_10M, SPEED_10, },
+ { RATE_100M, SPEED_100, },
+ { RATE_1G, SPEED_1000, },
+ { RATE_10G, SPEED_10000, },
+ { RATE_25G, SPEED_25000, },
+};
+
+static unsigned int
+nfp_eth_rate2speed(enum nfp_eth_rate rate)
+{
+ int i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].rate == rate)
+ return nsp_eth_rate_tbl[i].speed;
+
+ return 0;
+}
+
+static unsigned int
+nfp_eth_speed2rate(unsigned int speed)
+{
+ int i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].speed == speed)
+ return nsp_eth_rate_tbl[i].rate;
+
+ return RATE_INVALID;
+}
+
+static void
+nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
+{
+ int i;
+
+ for (i = 0; i < (int)ETH_ALEN; i++)
+ dst[ETH_ALEN - i - 1] = src[i];
+}
+
+static void
+nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
+ unsigned int index, struct nfp_eth_table_port *dst)
+{
+ unsigned int rate;
+ unsigned int fec;
+ uint64_t port, state;
+
+ port = rte_le_to_cpu_64(src->port);
+ state = rte_le_to_cpu_64(src->state);
+
+ dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port);
+ dst->index = index;
+ dst->nbi = index / NSP_ETH_NBI_PORT_COUNT;
+ dst->base = index % NSP_ETH_NBI_PORT_COUNT;
+ dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port);
+
+ dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state);
+ dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state);
+ dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state);
+
+ rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state));
+ dst->speed = dst->lanes * rate;
+
+ dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state);
+ dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state);
+
+ nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
+
+ dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port);
+ dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17)
+ return;
+
+ dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state);
+ dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 22)
+ return;
+
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT;
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT;
+ if (dst->fec_modes_supported)
+ dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED;
+
+ dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state);
+}
+
+static void
+nfp_eth_calc_port_geometry(struct nfp_eth_table *table)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < table->count; i++) {
+ table->max_index = RTE_MAX(table->max_index,
+ table->ports[i].index);
+
+ for (j = 0; j < table->count; j++) {
+ if (table->ports[i].label_port !=
+ table->ports[j].label_port)
+ continue;
+ table->ports[i].port_lanes += table->ports[j].lanes;
+
+ if (i == j)
+ continue;
+ if (table->ports[i].label_subport ==
+ table->ports[j].label_subport)
+ printf("Port %d subport %d is a duplicate\n",
+ table->ports[i].label_port,
+ table->ports[i].label_subport);
+
+ table->ports[i].is_split = 1;
+ }
+ }
+}
+
+static void
+nfp_eth_calc_port_type(struct nfp_eth_table_port *entry)
+{
+ if (entry->interface == NFP_INTERFACE_NONE) {
+ entry->port_type = PORT_NONE;
+ return;
+ }
+
+ if (entry->media == NFP_MEDIA_FIBRE)
+ entry->port_type = PORT_FIBRE;
+ else
+ entry->port_type = PORT_DA;
+}
+
+static struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries;
+ struct nfp_eth_table *table;
+ uint32_t table_sz;
+ int i, j, ret, cnt = 0;
+
+ entries = malloc(NSP_ETH_TABLE_SIZE);
+ if (!entries)
+ return NULL;
+
+ memset(entries, 0, NSP_ETH_TABLE_SIZE);
+ ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ if (ret < 0) {
+ printf("reading port table failed %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ cnt++;
+
+ /* Some versions of flash will give us 0 instead of port count. For
+ * those that give a port count, verify it against the value calculated
+ * above.
+ */
+ if (ret && ret != cnt) {
+ printf("table entry count (%d) unmatch entries present (%d)\n",
+ ret, cnt);
+ goto err;
+ }
+
+ table_sz = sizeof(*table) + sizeof(struct nfp_eth_table_port) * cnt;
+ table = malloc(table_sz);
+ if (!table)
+ goto err;
+
+ memset(table, 0, table_sz);
+ table->count = cnt;
+ for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ nfp_eth_port_translate(nsp, &entries[i], i,
+ &table->ports[j++]);
+
+ nfp_eth_calc_port_geometry(table);
+ for (i = 0; i < (int)table->count; i++)
+ nfp_eth_calc_port_type(&table->ports[i]);
+
+ free(entries);
+
+ return table;
+
+err:
+ free(entries);
+ return NULL;
+}
+
+/*
+ * nfp_eth_read_ports() - retrieve port information
+ * @cpp: NFP CPP handle
+ *
+ * Read the port information from the device. Returned structure should
+ * be freed with kfree() once no longer needed.
+ *
+ * Return: populated ETH table or NULL on error.
+ */
+struct nfp_eth_table *
+nfp_eth_read_ports(struct nfp_cpp *cpp)
+{
+ struct nfp_eth_table *ret;
+ struct nfp_nsp *nsp;
+
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp)
+ return NULL;
+
+ ret = __nfp_eth_read_ports(nsp);
+ nfp_nsp_close(nsp);
+
+ return ret;
+}
+
+struct nfp_nsp *
+nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ int ret;
+
+ entries = malloc(NSP_ETH_TABLE_SIZE);
+ if (!entries)
+ return NULL;
+
+ memset(entries, 0, NSP_ETH_TABLE_SIZE);
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp) {
+ free(entries);
+ return nsp;
+ }
+
+ ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ if (ret < 0) {
+ printf("reading port table failed %d\n", ret);
+ goto err;
+ }
+
+ if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) {
+ printf("trying to set port state on disabled port %d\n", idx);
+ goto err;
+ }
+
+ nfp_nsp_config_set_state(nsp, entries, idx);
+ return nsp;
+
+err:
+ nfp_nsp_close(nsp);
+ free(entries);
+ return NULL;
+}
+
+void
+nfp_eth_config_cleanup_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+
+ nfp_nsp_config_set_modified(nsp, 0);
+ nfp_nsp_config_clear_state(nsp);
+ nfp_nsp_close(nsp);
+ free(entries);
+}
+
+/*
+ * nfp_eth_config_commit_end() - perform recorded configuration changes
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ *
+ * Perform the configuration which was requested with __nfp_eth_set_*()
+ * helpers and recorded in @nsp state. If device was already configured
+ * as requested or no __nfp_eth_set_*() operations were made no NSP command
+ * will be performed.
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_config_commit_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ int ret = 1;
+
+ if (nfp_nsp_config_modified(nsp)) {
+ ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ ret = ret < 0 ? ret : 0;
+ }
+
+ nfp_eth_config_cleanup_end(nsp);
+
+ return ret;
+}
+
+/*
+ * nfp_eth_set_mod_enable() - set PHY module enable control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @enable: Desired state
+ *
+ * Enable or disable PHY module (this usually means setting the TX lanes
+ * disable bits).
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ uint64_t reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (!nsp)
+ return -1;
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ /* Check if we are already in requested state */
+ reg = rte_le_to_cpu_64(entries[idx].state);
+ if (enable != (int)FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
+ reg = rte_le_to_cpu_64(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_ENABLED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
+ entries[idx].control = rte_cpu_to_le_64(reg);
+
+ nfp_nsp_config_set_modified(nsp, 1);
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/*
+ * nfp_eth_set_configured() - set PHY module configured control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @configed: Desired state
+ *
+ * Set the ifup/ifdown state on the PHY.
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, int configed)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ uint64_t reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (!nsp)
+ return -EIO;
+
+ /*
+ * Older ABI versions did support this feature, however this has only
+ * been reliable since ABI 20.
+ */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 20) {
+ nfp_eth_config_cleanup_end(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ /* Check if we are already in requested state */
+ reg = rte_le_to_cpu_64(entries[idx].state);
+ if (configed != (int)FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) {
+ reg = rte_le_to_cpu_64(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_CONFIGURED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed);
+ entries[idx].control = rte_cpu_to_le_64(reg);
+
+ nfp_nsp_config_set_modified(nsp, 1);
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+static int
+nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
+ const uint64_t mask, const unsigned int shift,
+ unsigned int val, const uint64_t ctrl_bit)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ unsigned int idx = nfp_nsp_config_idx(nsp);
+ uint64_t reg;
+
+ /*
+ * Note: set features were added in ABI 0.14 but the error
+ * codes were initially not populated correctly.
+ */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17) {
+ printf("set operations not supported, please update flash\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are already in requested state */
+ reg = rte_le_to_cpu_64(entries[idx].raw[raw_idx]);
+ if (val == (reg & mask) >> shift)
+ return 0;
+
+ reg &= ~mask;
+ reg |= (val << shift) & mask;
+ entries[idx].raw[raw_idx] = rte_cpu_to_le_64(reg);
+
+ entries[idx].control |= rte_cpu_to_le_64(ctrl_bit);
+
+ nfp_nsp_config_set_modified(nsp, 1);
+
+ return 0;
+}
+
+#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \
+ (__extension__ ({ \
+ typeof(mask) _x = (mask); \
+ nfp_eth_set_bit_config(nsp, raw_idx, _x, __bf_shf(_x), \
+ val, ctrl_bit); \
+ }))
+
+/*
+ * __nfp_eth_set_aneg() - set PHY autonegotiation control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired autonegotiation mode
+ *
+ * Allow/disallow PHY module to advertise/perform autonegotiation.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int
+__nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_ANEG, mode,
+ NSP_ETH_CTRL_SET_ANEG);
+}
+
+/*
+ * __nfp_eth_set_fec() - set PHY forward error correction control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired fec mode
+ *
+ * Set the PHY module forward error correction mode.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int
+__nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_FEC, mode,
+ NSP_ETH_CTRL_SET_FEC);
+}
+
+/*
+ * nfp_eth_set_fec() - set PHY forward error correction control mode
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @mode: Desired fec mode
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (!nsp)
+ return -EIO;
+
+ err = __nfp_eth_set_fec(nsp, mode);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/*
+ * __nfp_eth_set_speed() - set interface speed/rate
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @speed: Desired speed (per lane)
+ *
+ * Set lane speed. Provided @speed value should be subport speed divided
+ * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for
+ * 50G, etc.)
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int
+__nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
+{
+ enum nfp_eth_rate rate;
+
+ rate = nfp_eth_speed2rate(speed);
+ if (rate == RATE_INVALID) {
+ printf("could not find matching lane rate for speed %u\n",
+ speed);
+ return -EINVAL;
+ }
+
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_RATE, rate,
+ NSP_ETH_CTRL_SET_RATE);
+}
+
+/*
+ * __nfp_eth_set_split() - set interface lane split
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @lanes: Desired lanes per port
+ *
+ * Set number of lanes in the port.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int
+__nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+ lanes, NSP_ETH_CTRL_SET_LANES);
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_resource.c b/drivers/net/nfp/nfpcore/nfp_resource.c
new file mode 100644
index 00000000..dd41fa4d
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_resource.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <time.h>
+#include <endian.h>
+
+#include <rte_string_fns.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp_resource.h"
+#include "nfp_crc.h"
+
+#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
+#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
+
+/* NFP Resource Table self-identifier */
+#define NFP_RESOURCE_TBL_NAME "nfp.res"
+#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
+
+#define NFP_RESOURCE_ENTRY_NAME_SZ 8
+
+/*
+ * struct nfp_resource_entry - Resource table entry
+ * @owner: NFP CPP Lock, interface owner
+ * @key: NFP CPP Lock, posix_crc32(name, 8)
+ * @region: Memory region descriptor
+ * @name: ASCII, zero padded name
+ * @reserved
+ * @cpp_action: CPP Action
+ * @cpp_token: CPP Token
+ * @cpp_target: CPP Target ID
+ * @page_offset: 256-byte page offset into target's CPP address
+ * @page_size: size, in 256-byte pages
+ */
+struct nfp_resource_entry {
+ struct nfp_resource_entry_mutex {
+ uint32_t owner;
+ uint32_t key;
+ } mutex;
+ struct nfp_resource_entry_region {
+ uint8_t name[NFP_RESOURCE_ENTRY_NAME_SZ];
+ uint8_t reserved[5];
+ uint8_t cpp_action;
+ uint8_t cpp_token;
+ uint8_t cpp_target;
+ uint32_t page_offset;
+ uint32_t page_size;
+ } region;
+};
+
+#define NFP_RESOURCE_TBL_SIZE 4096
+#define NFP_RESOURCE_TBL_ENTRIES (int)(NFP_RESOURCE_TBL_SIZE / \
+ sizeof(struct nfp_resource_entry))
+
+struct nfp_resource {
+ char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1];
+ uint32_t cpp_id;
+ uint64_t addr;
+ uint64_t size;
+ struct nfp_cpp_mutex *mutex;
+};
+
+static int
+nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
+{
+ char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ + 2];
+ struct nfp_resource_entry entry;
+ uint32_t cpp_id, key;
+ int ret, i;
+
+ cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */
+
+ memset(name_pad, 0, sizeof(name_pad));
+ strlcpy(name_pad, res->name, sizeof(name_pad));
+
+ /* Search for a matching entry */
+ if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) {
+ printf("Grabbing device lock not supported\n");
+ return -EOPNOTSUPP;
+ }
+ key = nfp_crc32_posix(name_pad, NFP_RESOURCE_ENTRY_NAME_SZ);
+
+ for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
+ uint64_t addr = NFP_RESOURCE_TBL_BASE +
+ sizeof(struct nfp_resource_entry) * i;
+
+ ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry));
+ if (ret != sizeof(entry))
+ return -EIO;
+
+ if (entry.mutex.key != key)
+ continue;
+
+ /* Found key! */
+ res->mutex =
+ nfp_cpp_mutex_alloc(cpp,
+ NFP_RESOURCE_TBL_TARGET, addr, key);
+ res->cpp_id = NFP_CPP_ID(entry.region.cpp_target,
+ entry.region.cpp_action,
+ entry.region.cpp_token);
+ res->addr = ((uint64_t)entry.region.page_offset) << 8;
+ res->size = (uint64_t)entry.region.page_size << 8;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int
+nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res,
+ struct nfp_cpp_mutex *dev_mutex)
+{
+ int err;
+
+ if (nfp_cpp_mutex_lock(dev_mutex))
+ return -EINVAL;
+
+ err = nfp_cpp_resource_find(cpp, res);
+ if (err)
+ goto err_unlock_dev;
+
+ err = nfp_cpp_mutex_trylock(res->mutex);
+ if (err)
+ goto err_res_mutex_free;
+
+ nfp_cpp_mutex_unlock(dev_mutex);
+
+ return 0;
+
+err_res_mutex_free:
+ nfp_cpp_mutex_free(res->mutex);
+err_unlock_dev:
+ nfp_cpp_mutex_unlock(dev_mutex);
+
+ return err;
+}
+
+/*
+ * nfp_resource_acquire() - Acquire a resource handle
+ * @cpp: NFP CPP handle
+ * @name: Name of the resource
+ *
+ * NOTE: This function locks the acquired resource
+ *
+ * Return: NFP Resource handle, or ERR_PTR()
+ */
+struct nfp_resource *
+nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
+{
+ struct nfp_cpp_mutex *dev_mutex;
+ struct nfp_resource *res;
+ int err;
+ struct timespec wait;
+ int count;
+
+ res = malloc(sizeof(*res));
+ if (!res)
+ return NULL;
+
+ memset(res, 0, sizeof(*res));
+
+ strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ);
+
+ dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE,
+ NFP_RESOURCE_TBL_KEY);
+ if (!dev_mutex) {
+ free(res);
+ return NULL;
+ }
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 1000000;
+ count = 0;
+
+ for (;;) {
+ err = nfp_resource_try_acquire(cpp, res, dev_mutex);
+ if (!err)
+ break;
+ if (err != -EBUSY)
+ goto err_free;
+
+ if (count++ > 1000) {
+ printf("Error: resource %s timed out\n", name);
+ err = -EBUSY;
+ goto err_free;
+ }
+
+ nanosleep(&wait, NULL);
+ }
+
+ nfp_cpp_mutex_free(dev_mutex);
+
+ return res;
+
+err_free:
+ nfp_cpp_mutex_free(dev_mutex);
+ free(res);
+ return NULL;
+}
+
+/*
+ * nfp_resource_release() - Release a NFP Resource handle
+ * @res: NFP Resource handle
+ *
+ * NOTE: This function implictly unlocks the resource handle
+ */
+void
+nfp_resource_release(struct nfp_resource *res)
+{
+ nfp_cpp_mutex_unlock(res->mutex);
+ nfp_cpp_mutex_free(res->mutex);
+ free(res);
+}
+
+/*
+ * nfp_resource_cpp_id() - Return the cpp_id of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: NFP CPP ID
+ */
+uint32_t
+nfp_resource_cpp_id(const struct nfp_resource *res)
+{
+ return res->cpp_id;
+}
+
+/*
+ * nfp_resource_name() - Return the name of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: const char pointer to the name of the resource
+ */
+const char
+*nfp_resource_name(const struct nfp_resource *res)
+{
+ return res->name;
+}
+
+/*
+ * nfp_resource_address() - Return the address of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: Address of the resource
+ */
+uint64_t
+nfp_resource_address(const struct nfp_resource *res)
+{
+ return res->addr;
+}
+
+/*
+ * nfp_resource_size() - Return the size in bytes of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: Size of the resource in bytes
+ */
+uint64_t
+nfp_resource_size(const struct nfp_resource *res)
+{
+ return res->size;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_resource.h b/drivers/net/nfp/nfpcore/nfp_resource.h
new file mode 100644
index 00000000..06cc6f74
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_resource.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef NFP_RESOURCE_H
+#define NFP_RESOURCE_H
+
+#include "nfp_cpp.h"
+
+#define NFP_RESOURCE_NFP_NFFW "nfp.nffw"
+#define NFP_RESOURCE_NFP_HWINFO "nfp.info"
+#define NFP_RESOURCE_NSP "nfp.sp"
+
+/**
+ * Opaque handle to a NFP Resource
+ */
+struct nfp_resource;
+
+struct nfp_resource *nfp_resource_acquire(struct nfp_cpp *cpp,
+ const char *name);
+
+/**
+ * Release a NFP Resource, and free the handle
+ * @param[in] res NFP Resource handle
+ */
+void nfp_resource_release(struct nfp_resource *res);
+
+/**
+ * Return the CPP ID of a NFP Resource
+ * @param[in] res NFP Resource handle
+ * @return CPP ID of the NFP Resource
+ */
+uint32_t nfp_resource_cpp_id(const struct nfp_resource *res);
+
+/**
+ * Return the name of a NFP Resource
+ * @param[in] res NFP Resource handle
+ * @return Name of the NFP Resource
+ */
+const char *nfp_resource_name(const struct nfp_resource *res);
+
+/**
+ * Return the target address of a NFP Resource
+ * @param[in] res NFP Resource handle
+ * @return Address of the NFP Resource
+ */
+uint64_t nfp_resource_address(const struct nfp_resource *res);
+
+uint64_t nfp_resource_size(const struct nfp_resource *res);
+
+#endif /* NFP_RESOURCE_H */
diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.c b/drivers/net/nfp/nfpcore/nfp_rtsym.c
new file mode 100644
index 00000000..cb7d83db
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_rtsym.c
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+/*
+ * nfp_rtsym.c
+ * Interface for accessing run-time symbol table
+ */
+
+#include <stdio.h>
+#include <rte_byteorder.h>
+#include "nfp_cpp.h"
+#include "nfp_mip.h"
+#include "nfp_rtsym.h"
+#include "nfp6000/nfp6000.h"
+
+/* These need to match the linker */
+#define SYM_TGT_LMEM 0
+#define SYM_TGT_EMU_CACHE 0x17
+
+struct nfp_rtsym_entry {
+ uint8_t type;
+ uint8_t target;
+ uint8_t island;
+ uint8_t addr_hi;
+ uint32_t addr_lo;
+ uint16_t name;
+ uint8_t menum;
+ uint8_t size_hi;
+ uint32_t size_lo;
+};
+
+struct nfp_rtsym_table {
+ struct nfp_cpp *cpp;
+ int num;
+ char *strtab;
+ struct nfp_rtsym symtab[];
+};
+
+static int
+nfp_meid(uint8_t island_id, uint8_t menum)
+{
+ return (island_id & 0x3F) == island_id && menum < 12 ?
+ (island_id << 4) | (menum + 4) : -1;
+}
+
+static void
+nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, uint32_t strtab_size,
+ struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw)
+{
+ sw->type = fw->type;
+ sw->name = cache->strtab + rte_le_to_cpu_16(fw->name) % strtab_size;
+ sw->addr = ((uint64_t)fw->addr_hi << 32) |
+ rte_le_to_cpu_32(fw->addr_lo);
+ sw->size = ((uint64_t)fw->size_hi << 32) |
+ rte_le_to_cpu_32(fw->size_lo);
+
+#ifdef DEBUG
+ printf("rtsym_entry_init\n");
+ printf("\tname=%s, addr=%" PRIx64 ", size=%" PRIu64 ",target=%d\n",
+ sw->name, sw->addr, sw->size, sw->target);
+#endif
+ switch (fw->target) {
+ case SYM_TGT_LMEM:
+ sw->target = NFP_RTSYM_TARGET_LMEM;
+ break;
+ case SYM_TGT_EMU_CACHE:
+ sw->target = NFP_RTSYM_TARGET_EMU_CACHE;
+ break;
+ default:
+ sw->target = fw->target;
+ break;
+ }
+
+ if (fw->menum != 0xff)
+ sw->domain = nfp_meid(fw->island, fw->menum);
+ else if (fw->island != 0xff)
+ sw->domain = fw->island;
+ else
+ sw->domain = -1;
+}
+
+struct nfp_rtsym_table *
+nfp_rtsym_table_read(struct nfp_cpp *cpp)
+{
+ struct nfp_rtsym_table *rtbl;
+ struct nfp_mip *mip;
+
+ mip = nfp_mip_open(cpp);
+ rtbl = __nfp_rtsym_table_read(cpp, mip);
+ nfp_mip_close(mip);
+
+ return rtbl;
+}
+
+/*
+ * This looks more complex than it should be. But we need to get the type for
+ * the ~ right in round_down (it needs to be as wide as the result!), and we
+ * want to evaluate the macro arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
+
+#define round_up(x, y) \
+ (__extension__ ({ \
+ typeof(x) _x = (x); \
+ ((((_x) - 1) | __round_mask(_x, y)) + 1); \
+ }))
+
+#define round_down(x, y) \
+ (__extension__ ({ \
+ typeof(x) _x = (x); \
+ ((_x) & ~__round_mask(_x, y)); \
+ }))
+
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip)
+{
+ uint32_t strtab_addr, symtab_addr, strtab_size, symtab_size;
+ struct nfp_rtsym_entry *rtsymtab;
+ struct nfp_rtsym_table *cache;
+ const uint32_t dram =
+ NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
+ NFP_ISL_EMEM0;
+ int err, n, size;
+
+ if (!mip)
+ return NULL;
+
+ nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
+ nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
+
+ if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
+ return NULL;
+
+ /* Align to 64 bits */
+ symtab_size = round_up(symtab_size, 8);
+ strtab_size = round_up(strtab_size, 8);
+
+ rtsymtab = malloc(symtab_size);
+ if (!rtsymtab)
+ return NULL;
+
+ size = sizeof(*cache);
+ size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
+ size += strtab_size + 1;
+ cache = malloc(size);
+ if (!cache)
+ goto exit_free_rtsym_raw;
+
+ cache->cpp = cpp;
+ cache->num = symtab_size / sizeof(*rtsymtab);
+ cache->strtab = (void *)&cache->symtab[cache->num];
+
+ err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
+ if (err != (int)symtab_size)
+ goto exit_free_cache;
+
+ err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
+ if (err != (int)strtab_size)
+ goto exit_free_cache;
+ cache->strtab[strtab_size] = '\0';
+
+ for (n = 0; n < cache->num; n++)
+ nfp_rtsym_sw_entry_init(cache, strtab_size,
+ &cache->symtab[n], &rtsymtab[n]);
+
+ free(rtsymtab);
+
+ return cache;
+
+exit_free_cache:
+ free(cache);
+exit_free_rtsym_raw:
+ free(rtsymtab);
+ return NULL;
+}
+
+/*
+ * nfp_rtsym_count() - Get the number of RTSYM descriptors
+ * @rtbl: NFP RTsym table
+ *
+ * Return: Number of RTSYM descriptors
+ */
+int
+nfp_rtsym_count(struct nfp_rtsym_table *rtbl)
+{
+ if (!rtbl)
+ return -EINVAL;
+
+ return rtbl->num;
+}
+
+/*
+ * nfp_rtsym_get() - Get the Nth RTSYM descriptor
+ * @rtbl: NFP RTsym table
+ * @idx: Index (0-based) of the RTSYM descriptor
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *
+nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx)
+{
+ if (!rtbl)
+ return NULL;
+
+ if (idx >= rtbl->num)
+ return NULL;
+
+ return &rtbl->symtab[idx];
+}
+
+/*
+ * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name
+ * @rtbl: NFP RTsym table
+ * @name: Symbol name
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)
+{
+ int n;
+
+ if (!rtbl)
+ return NULL;
+
+ for (n = 0; n < rtbl->num; n++)
+ if (strcmp(name, rtbl->symtab[n].name) == 0)
+ return &rtbl->symtab[n];
+
+ return NULL;
+}
+
+/*
+ * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
+ * @rtbl: NFP RTsym table
+ * @name: Symbol name
+ * @error: Poniter to error code (optional)
+ *
+ * Lookup a symbol, map, read it and return it's value. Value of the symbol
+ * will be interpreted as a simple little-endian unsigned value. Symbol can
+ * be 4 or 8 bytes in size.
+ *
+ * Return: value read, on error sets the error and returns ~0ULL.
+ */
+uint64_t
+nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error)
+{
+ const struct nfp_rtsym *sym;
+ uint32_t val32, id;
+ uint64_t val;
+ int err;
+
+ sym = nfp_rtsym_lookup(rtbl, name);
+ if (!sym) {
+ err = -ENOENT;
+ goto exit;
+ }
+
+ id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
+
+#ifdef DEBUG
+ printf("Reading symbol %s with size %" PRIu64 " at %" PRIx64 "\n",
+ name, sym->size, sym->addr);
+#endif
+ switch (sym->size) {
+ case 4:
+ err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32);
+ val = val32;
+ break;
+ case 8:
+ err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val);
+ break;
+ default:
+ printf("rtsym '%s' unsupported size: %" PRId64 "\n",
+ name, sym->size);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ err = -EIO;
+exit:
+ if (error)
+ *error = err;
+
+ if (err)
+ return ~0ULL;
+
+ return val;
+}
+
+uint8_t *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name,
+ unsigned int min_size, struct nfp_cpp_area **area)
+{
+ const struct nfp_rtsym *sym;
+ uint8_t *mem;
+
+#ifdef DEBUG
+ printf("mapping symbol %s\n", name);
+#endif
+ sym = nfp_rtsym_lookup(rtbl, name);
+ if (!sym) {
+ printf("symbol lookup fails for %s\n", name);
+ return NULL;
+ }
+
+ if (sym->size < min_size) {
+ printf("Symbol %s too small (%" PRIu64 " < %u)\n", name,
+ sym->size, min_size);
+ return NULL;
+ }
+
+ mem = nfp_cpp_map_area(rtbl->cpp, sym->domain, sym->target, sym->addr,
+ sym->size, area);
+ if (!mem) {
+ printf("Failed to map symbol %s\n", name);
+ return NULL;
+ }
+#ifdef DEBUG
+ printf("symbol %s with address %p\n", name, mem);
+#endif
+
+ return mem;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.h b/drivers/net/nfp/nfpcore/nfp_rtsym.h
new file mode 100644
index 00000000..8b494211
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_rtsym.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_RTSYM_H__
+#define __NFP_RTSYM_H__
+
+#define NFP_RTSYM_TYPE_NONE 0
+#define NFP_RTSYM_TYPE_OBJECT 1
+#define NFP_RTSYM_TYPE_FUNCTION 2
+#define NFP_RTSYM_TYPE_ABS 3
+
+#define NFP_RTSYM_TARGET_NONE 0
+#define NFP_RTSYM_TARGET_LMEM -1
+#define NFP_RTSYM_TARGET_EMU_CACHE -7
+
+/*
+ * Structure describing a run-time NFP symbol.
+ *
+ * The memory target of the symbol is generally the CPP target number and can be
+ * used directly by the nfp_cpp API calls. However, in some cases (i.e., for
+ * local memory or control store) the target is encoded using a negative number.
+ *
+ * When the target type can not be used to fully describe the location of a
+ * symbol the domain field is used to further specify the location (i.e., the
+ * specific ME or island number).
+ *
+ * For ME target resources, 'domain' is an MEID.
+ * For Island target resources, 'domain' is an island ID, with the one exception
+ * of "sram" symbols for backward compatibility, which are viewed as global.
+ */
+struct nfp_rtsym {
+ const char *name;
+ uint64_t addr;
+ uint64_t size;
+ int type;
+ int target;
+ int domain;
+};
+
+struct nfp_rtsym_table;
+
+struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp);
+
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip);
+
+int nfp_rtsym_count(struct nfp_rtsym_table *rtbl);
+
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx);
+
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
+
+uint64_t nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
+ int *error);
+uint8_t *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name,
+ unsigned int min_size, struct nfp_cpp_area **area);
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_target.h b/drivers/net/nfp/nfpcore/nfp_target.h
new file mode 100644
index 00000000..2884a003
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_target.h
@@ -0,0 +1,579 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef NFP_TARGET_H
+#define NFP_TARGET_H
+
+#include "nfp-common/nfp_resid.h"
+#include "nfp-common/nfp_cppat.h"
+#include "nfp-common/nfp_platform.h"
+#include "nfp_cpp.h"
+
+#define P32 1
+#define P64 2
+
+#define PUSHPULL(_pull, _push) (((_pull) << 4) | ((_push) << 0))
+
+#ifndef NFP_ERRNO
+#include <errno.h>
+#define NFP_ERRNO(x) (errno = (x), -1)
+#endif
+
+static inline int
+pushpull_width(int pp)
+{
+ pp &= 0xf;
+
+ if (pp == 0)
+ return NFP_ERRNO(EINVAL);
+ return (2 << pp);
+}
+
+#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0)
+#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4)
+
+static inline int
+target_rw(uint32_t cpp_id, int pp, int start, int len)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < start || island > (start + len)))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0):
+ return PUSHPULL(0, pp);
+ case NFP_CPP_ID(0, 1, 0):
+ return PUSHPULL(pp, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(pp, pp);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_dma(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiDma */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiDma */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_stats(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiStats */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiStats */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_tm(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiTM */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiTM */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_ppc(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiPreclassifier */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiPreclassifier */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi(uint32_t cpp_id, uint64_t address)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ uint64_t rel_addr = address & 0x3fFFFF;
+
+ if (island && (island < 8 || island > 9))
+ return NFP_ERRNO(EINVAL);
+
+ if (rel_addr < (1 << 20))
+ return nfp6000_nbi_dma(cpp_id);
+ if (rel_addr < (2 << 20))
+ return nfp6000_nbi_stats(cpp_id);
+ if (rel_addr < (3 << 20))
+ return nfp6000_nbi_tm(cpp_id);
+ return nfp6000_nbi_ppc(cpp_id);
+}
+
+/*
+ * This structure ONLY includes items that can be done with a read or write of
+ * 32-bit or 64-bit words. All others are not listed.
+ */
+static inline int
+nfp6000_mu_common(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): /* read_be/write_be */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): /* read_le/write_le */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 2): /* {read/write}_swap_be */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 3): /* {read/write}_swap_le */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, 0, 0): /* read_be */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 0, 1): /* read_le */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 0, 2): /* read_swap_be */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 0, 3): /* read_swap_le */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* write_be */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 1, 1): /* write_le */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 1, 2): /* write_swap_be */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 1, 3): /* write_swap_le */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 3, 0): /* atomic_read */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 2): /* mask_compare_write */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 0): /* atomic_write */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 2): /* atomic_write_imm */
+ return PUSHPULL(0, 0);
+ case NFP_CPP_ID(0, 4, 3): /* swap_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 5, 0): /* set */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 5, 3): /* test_set_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 6, 0): /* clr */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 3): /* test_clr_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 7, 0): /* add */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 7, 3): /* test_add_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 8, 0): /* addsat */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 8, 3): /* test_subsat_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 0): /* sub */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 9, 3): /* test_sub_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 10, 0): /* subsat */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 10, 3): /* test_subsat_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 13, 0): /* microq128_get */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 13, 1): /* microq128_pop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 13, 2): /* microq128_put */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 15, 0): /* xor */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 15, 3): /* test_xor_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 0): /* read32_be */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 1): /* read32_le */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 2): /* read32_swap_be */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 3): /* read32_swap_le */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 31, 0): /* write32_be */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 31, 1): /* write32_le */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 31, 2): /* write32_swap_be */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 31, 3): /* write32_swap_le */
+ return PUSHPULL(P32, 0);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_mu_ctm(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 16, 1): /* packet_read_packet_status */
+ return PUSHPULL(0, P32);
+ default:
+ return nfp6000_mu_common(cpp_id);
+ }
+}
+
+static inline int
+nfp6000_mu_emu(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 18, 0): /* read_queue */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 18, 1): /* read_queue_ring */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 18, 2): /* write_queue */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 18, 3): /* write_queue_ring */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 20, 2): /* journal */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 21, 0): /* get */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 21, 1): /* get_eop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 21, 2): /* get_freely */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 22, 0): /* pop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 22, 1): /* pop_eop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 22, 2): /* pop_freely */
+ return PUSHPULL(0, P32);
+ default:
+ return nfp6000_mu_common(cpp_id);
+ }
+}
+
+static inline int
+nfp6000_mu_imu(uint32_t cpp_id)
+{
+ return nfp6000_mu_common(cpp_id);
+}
+
+static inline int
+nfp6000_mu(uint32_t cpp_id, uint64_t address)
+{
+ int pp;
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island == 0) {
+ if (address < 0x2000000000ULL)
+ pp = nfp6000_mu_ctm(cpp_id);
+ else if (address < 0x8000000000ULL)
+ pp = nfp6000_mu_emu(cpp_id);
+ else if (address < 0x9800000000ULL)
+ pp = nfp6000_mu_ctm(cpp_id);
+ else if (address < 0x9C00000000ULL)
+ pp = nfp6000_mu_emu(cpp_id);
+ else if (address < 0xA000000000ULL)
+ pp = nfp6000_mu_imu(cpp_id);
+ else
+ pp = nfp6000_mu_ctm(cpp_id);
+ } else if (island >= 24 && island <= 27) {
+ pp = nfp6000_mu_emu(cpp_id);
+ } else if (island >= 28 && island <= 31) {
+ pp = nfp6000_mu_imu(cpp_id);
+ } else if (island == 1 ||
+ (island >= 4 && island <= 7) ||
+ (island >= 12 && island <= 13) ||
+ (island >= 32 && island <= 47) ||
+ (island >= 48 && island <= 51)) {
+ pp = nfp6000_mu_ctm(cpp_id);
+ } else {
+ pp = NFP_ERRNO(EINVAL);
+ }
+
+ return pp;
+}
+
+static inline int
+nfp6000_ila(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 48 || island > 51))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 1): /* read_check_error */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 0): /* read_int */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 0): /* write_int */
+ return PUSHPULL(P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 48, 4);
+ }
+}
+
+static inline int
+nfp6000_pci(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 4 || island > 7))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 2, 0):
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 0):
+ return PUSHPULL(P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 4, 4);
+ }
+}
+
+static inline int
+nfp6000_crypto(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 12 || island > 15))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 2, 0):
+ return PUSHPULL(P64, 0);
+ default:
+ return target_rw(cpp_id, P64, 12, 4);
+ }
+}
+
+static inline int
+nfp6000_cap_xpb(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 1 || island > 63))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 1): /* RingGet */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 0, 2): /* Interthread Signal */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 1, 1): /* RingPut */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 1, 2): /* CTNNWr */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 2, 0): /* ReflectRd, signal none */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 1): /* ReflectRd, signal self */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 2): /* ReflectRd, signal remote */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 3): /* ReflectRd, signal both */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 0): /* ReflectWr, signal none */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 3, 1): /* ReflectWr, signal self */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 3, 2): /* ReflectWr, signal remote */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 3, 3): /* ReflectWr, signal both */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1):
+ return PUSHPULL(P32, P32);
+ default:
+ return target_rw(cpp_id, P32, 1, 63);
+ }
+}
+
+static inline int
+nfp6000_cls(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 1 || island > 63))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 3): /* xor */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 2, 0): /* set */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 2, 1): /* clr */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 0): /* add */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 1): /* add64 */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 0): /* sub */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 1): /* sub64 */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 2): /* subsat */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 8, 2): /* hash_mask */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 8, 3): /* hash_clear */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 9, 0): /* ring_get */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 1): /* ring_pop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 2): /* ring_get_freely */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 3): /* ring_pop_freely */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 10, 0): /* ring_put */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 10, 2): /* ring_journal */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 14, 0): /* reflect_write_sig_local */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 15, 1): /* reflect_read_sig_local */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 17, 2): /* statistic */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 24, 0): /* ring_read */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 24, 1): /* ring_write */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 25, 0): /* ring_workq_add_thread */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 25, 1): /* ring_workq_add_work */
+ return PUSHPULL(P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 0, 64);
+ }
+}
+
+static inline int
+nfp6000_target_pushpull(uint32_t cpp_id, uint64_t address)
+{
+ switch (NFP_CPP_ID_TARGET_of(cpp_id)) {
+ case NFP6000_CPPTGT_NBI:
+ return nfp6000_nbi(cpp_id, address);
+ case NFP6000_CPPTGT_VQDR:
+ return target_rw(cpp_id, P32, 24, 4);
+ case NFP6000_CPPTGT_ILA:
+ return nfp6000_ila(cpp_id);
+ case NFP6000_CPPTGT_MU:
+ return nfp6000_mu(cpp_id, address);
+ case NFP6000_CPPTGT_PCIE:
+ return nfp6000_pci(cpp_id);
+ case NFP6000_CPPTGT_ARM:
+ if (address < 0x10000)
+ return target_rw(cpp_id, P64, 1, 1);
+ else
+ return target_rw(cpp_id, P32, 1, 1);
+ case NFP6000_CPPTGT_CRYPTO:
+ return nfp6000_crypto(cpp_id);
+ case NFP6000_CPPTGT_CTXPB:
+ return nfp6000_cap_xpb(cpp_id);
+ case NFP6000_CPPTGT_CLS:
+ return nfp6000_cls(cpp_id);
+ case 0:
+ return target_rw(cpp_id, P32, 4, 4);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp_target_pushpull_width(int pp, int write_not_read)
+{
+ if (pp < 0)
+ return pp;
+
+ if (write_not_read)
+ return PULL_WIDTH(pp);
+ else
+ return PUSH_WIDTH(pp);
+}
+
+static inline int
+nfp6000_target_action_width(uint32_t cpp_id, uint64_t address,
+ int write_not_read)
+{
+ int pp;
+
+ pp = nfp6000_target_pushpull(cpp_id, address);
+
+ return nfp_target_pushpull_width(pp, write_not_read);
+}
+
+static inline int
+nfp_target_action_width(uint32_t model, uint32_t cpp_id, uint64_t address,
+ int write_not_read)
+{
+ if (NFP_CPP_MODEL_IS_6000(model)) {
+ return nfp6000_target_action_width(cpp_id, address,
+ write_not_read);
+ } else {
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address,
+ uint32_t *cpp_target_id, uint64_t *cpp_target_address,
+ const uint32_t *imb_table)
+{
+ int err;
+ int island = NFP_CPP_ID_ISLAND_of(cpp_island_id);
+ int target = NFP_CPP_ID_TARGET_of(cpp_island_id);
+ uint32_t imb;
+
+ if (target < 0 || target >= 16)
+ return NFP_ERRNO(EINVAL);
+
+ if (island == 0) {
+ /* Already translated */
+ *cpp_target_id = cpp_island_id;
+ *cpp_target_address = cpp_island_address;
+ return 0;
+ }
+
+ if (!imb_table) {
+ /* CPP + Island only allowed on systems with IMB tables */
+ return NFP_ERRNO(EINVAL);
+ }
+
+ imb = imb_table[target];
+
+ *cpp_target_address = cpp_island_address;
+ err = _nfp6000_cppat_addr_encode(cpp_target_address, island, target,
+ ((imb >> 13) & 7),
+ ((imb >> 12) & 1),
+ ((imb >> 6) & 0x3f),
+ ((imb >> 0) & 0x3f));
+ if (err == 0) {
+ *cpp_target_id =
+ NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id),
+ NFP_CPP_ID_TOKEN_of(cpp_island_id));
+ }
+
+ return err;
+}
+
+#endif /* NFP_TARGET_H */
diff --git a/drivers/net/null/meson.build b/drivers/net/null/meson.build
index 68ac0d2a..60e2ce6c 100644
--- a/drivers/net/null/meson.build
+++ b/drivers/net/null/meson.build
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
sources = files('rte_eth_null.c')
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index d003b283..1d2e6b9e 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -73,6 +73,7 @@ struct pmd_internals {
struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
+ struct ether_addr eth_addr;
/** Bit mask of RSS offloads, the bit offset also means flow type */
uint64_t flow_type_rss_offloads;
@@ -84,16 +85,19 @@ struct pmd_internals {
uint8_t rss_key[40]; /**< 40-byte hash key. */
};
-
-
-static struct ether_addr eth_addr = { .addr_bytes = {0} };
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG,
+ .link_autoneg = ETH_LINK_FIXED,
};
+static int eth_null_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_null_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static uint16_t
eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -105,10 +109,10 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
return 0;
packet_size = h->internals->packet_size;
+ if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
+ return 0;
+
for (i = 0; i < nb_bufs; i++) {
- bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
- if (!bufs[i])
- break;
bufs[i]->data_len = (uint16_t)packet_size;
bufs[i]->pkt_len = packet_size;
bufs[i]->port = h->internals->port_id;
@@ -130,10 +134,10 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
return 0;
packet_size = h->internals->packet_size;
+ if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
+ return 0;
+
for (i = 0; i < nb_bufs; i++) {
- bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
- if (!bufs[i])
- break;
rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
packet_size);
bufs[i]->data_len = (uint16_t)packet_size;
@@ -461,10 +465,11 @@ eth_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
-static void
+static int
eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
__rte_unused struct ether_addr *addr)
{
+ return 0;
}
static const struct eth_dev_ops ops = {
@@ -496,7 +501,7 @@ eth_dev_null_create(struct rte_vdev_device *dev,
{
const unsigned nb_rx_queues = 1;
const unsigned nb_tx_queues = 1;
- struct rte_eth_dev_data *data = NULL;
+ struct rte_eth_dev_data *data;
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
@@ -510,22 +515,12 @@ eth_dev_null_create(struct rte_vdev_device *dev,
if (dev->device.numa_node == SOCKET_ID_ANY)
dev->device.numa_node = rte_socket_id();
- RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
+ PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
dev->device.numa_node);
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
- data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
- dev->device.numa_node);
- if (!data)
- return -ENOMEM;
-
eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
- if (!eth_dev) {
- rte_free(data);
+ if (!eth_dev)
return -ENOMEM;
- }
/* now put it all together
* - store queue data in internals,
@@ -540,19 +535,19 @@ eth_dev_null_create(struct rte_vdev_device *dev,
internals->packet_size = packet_size;
internals->packet_copy = packet_copy;
internals->port_id = eth_dev->data->port_id;
+ eth_random_addr(internals->eth_addr.addr_bytes);
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
rte_memcpy(internals->rss_key, default_rss_key, 40);
- rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data = eth_dev->data;
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
- data->mac_addrs = &eth_addr;
+ data->mac_addrs = &internals->eth_addr;
- eth_dev->data = data;
eth_dev->dev_ops = &ops;
/* finally assign rx and tx ops */
@@ -564,6 +559,7 @@ eth_dev_null_create(struct rte_vdev_device *dev,
eth_dev->tx_pkt_burst = eth_null_tx;
}
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -608,6 +604,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
unsigned packet_size = default_packet_size;
unsigned packet_copy = default_packet_copy;
struct rte_kvargs *kvlist = NULL;
+ struct rte_eth_dev *eth_dev;
int ret;
if (!dev)
@@ -615,7 +612,20 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
- RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
+ PMD_LOG(INFO, "Initializing pmd_null for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
if (params != NULL) {
kvlist = rte_kvargs_parse(params, valid_arguments);
@@ -641,8 +651,8 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
}
}
- RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
- "packet copy is %s\n", packet_size,
+ PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
+ "packet copy is %s", packet_size,
packet_copy ? "enabled" : "disabled");
ret = eth_dev_null_create(dev, packet_size, packet_copy);
@@ -661,7 +671,7 @@ rte_pmd_null_remove(struct rte_vdev_device *dev)
if (!dev)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
+ PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
rte_socket_id());
/* find the ethdev entry */
@@ -670,7 +680,6 @@ rte_pmd_null_remove(struct rte_vdev_device *dev)
return -1;
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
@@ -687,3 +696,12 @@ RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
RTE_PMD_REGISTER_PARAM_STRING(net_null,
"size=<int> "
"copy=<int>");
+
+RTE_INIT(eth_null_init_log);
+static void
+eth_null_init_log(void)
+{
+ eth_null_logtype = rte_log_register("pmd.net.null");
+ if (eth_null_logtype >= 0)
+ rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/octeontx/Makefile b/drivers/net/octeontx/Makefile
index 3e4a1066..885f1768 100644
--- a/drivers/net/octeontx/Makefile
+++ b/drivers/net/octeontx/Makefile
@@ -10,6 +10,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_octeontx.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
EXPORT_MAP := rte_pmd_octeontx_version.map
@@ -46,7 +47,7 @@ endif
CFLAGS_octeontx_ethdev.o += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_common_octeontx
LDLIBS += -lrte_mempool_octeontx
LDLIBS += -lrte_eventdev
LDLIBS += -lrte_bus_pci
diff --git a/drivers/net/octeontx/base/octeontx_bgx.c b/drivers/net/octeontx/base/octeontx_bgx.c
index 8576d8ed..0e238826 100644
--- a/drivers/net/octeontx/base/octeontx_bgx.c
+++ b/drivers/net/octeontx/base/octeontx_bgx.c
@@ -19,7 +19,7 @@ octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf)
hdr.msg = MBOX_BGX_PORT_OPEN;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
if (res < 0)
return -EACCES;
@@ -49,7 +49,7 @@ octeontx_bgx_port_close(int port)
hdr.msg = MBOX_BGX_PORT_CLOSE;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
@@ -66,7 +66,7 @@ octeontx_bgx_port_start(int port)
hdr.msg = MBOX_BGX_PORT_START;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
@@ -83,7 +83,7 @@ octeontx_bgx_port_stop(int port)
hdr.msg = MBOX_BGX_PORT_STOP;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
@@ -103,7 +103,7 @@ octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf)
hdr.vfid = port;
memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t));
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
if (res < 0)
return -EACCES;
@@ -135,7 +135,7 @@ octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat)
hdr.msg = MBOX_BGX_PORT_GET_STATUS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stat, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stat, len);
if (res < 0)
return -EACCES;
@@ -156,7 +156,7 @@ octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats)
hdr.msg = MBOX_BGX_PORT_GET_STATS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stats, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stats, len);
if (res < 0)
return -EACCES;
@@ -181,7 +181,7 @@ octeontx_bgx_port_stats_clr(int port)
hdr.msg = MBOX_BGX_PORT_CLR_STATS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
@@ -200,7 +200,7 @@ octeontx_bgx_port_link_status(int port)
hdr.msg = MBOX_BGX_PORT_GET_LINK_STATUS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &link, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &link, len);
if (res < 0)
return -EACCES;
@@ -219,7 +219,7 @@ octeontx_bgx_port_promisc_set(int port, int en)
hdr.vfid = port;
prom = en ? 1 : 0;
- res = octeontx_ssovf_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0);
+ res = octeontx_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0);
if (res < 0)
return -EACCES;
@@ -237,7 +237,7 @@ octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr)
hdr.msg = MBOX_BGX_PORT_SET_MACADDR;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, mac_addr, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, mac_addr, len, NULL, 0);
if (res < 0)
return -EACCES;
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index 58a7f110..1babea0e 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -19,7 +19,7 @@ octeontx_pki_port_open(int port)
hdr.msg = MBOX_PKI_PORT_OPEN;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
return res;
@@ -38,7 +38,7 @@ octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg)
hdr.msg = MBOX_PKI_PORT_HASH_CONFIG;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &h_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &h_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -58,7 +58,7 @@ octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg)
hdr.msg = MBOX_PKI_PORT_PKTBUF_CONFIG;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &b_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &b_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
return res;
@@ -77,7 +77,7 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
hdr.msg = MBOX_PKI_PORT_CREATE_QOS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -99,7 +99,7 @@ octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
hdr.msg = MBOX_PKI_PORT_ERRCHK_CONFIG;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &e_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &e_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index d036054c..764aff53 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -422,7 +422,7 @@ octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -442,7 +442,7 @@ octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -464,7 +464,7 @@ octeontx_pki_port_close(int port)
hdr.msg = MBOX_PKI_PORT_CLOSE;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -486,7 +486,7 @@ octeontx_pki_port_start(int port)
hdr.msg = MBOX_PKI_PORT_START;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -508,7 +508,7 @@ octeontx_pki_port_stop(int port)
hdr.msg = MBOX_PKI_PORT_STOP;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
if (res < 0)
return -EACCES;
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index b739c0b3..1eb453b2 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -122,7 +122,7 @@ octeontx_port_open(struct octeontx_nic *nic)
int res;
res = 0;
-
+ memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf));
PMD_INIT_FUNC_TRACE();
res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf);
@@ -283,34 +283,14 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (!rxmode->hw_strip_crc) {
+ if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
- rxmode->hw_strip_crc = 1;
- }
-
- if (rxmode->hw_ip_checksum) {
- PMD_INIT_LOG(NOTICE, "rxcksum not supported");
- rxmode->hw_ip_checksum = 0;
- }
-
- if (rxmode->split_hdr_size) {
- octeontx_log_err("rxmode does not support split header");
- return -EINVAL;
+ rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (rxmode->hw_vlan_filter) {
- octeontx_log_err("VLAN filter not supported");
- return -EINVAL;
- }
-
- if (rxmode->hw_vlan_extend) {
- octeontx_log_err("VLAN extended not supported");
- return -EINVAL;
- }
-
- if (rxmode->enable_lro) {
- octeontx_log_err("LRO not supported");
- return -EINVAL;
+ if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+ PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
+ txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
}
if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
@@ -488,20 +468,6 @@ octeontx_dev_promisc_disable(struct rte_eth_dev *dev)
octeontx_port_promisc_set(nic, 0);
}
-static inline int
-octeontx_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static int
octeontx_port_link_status(struct octeontx_nic *nic)
{
@@ -532,7 +498,6 @@ octeontx_dev_link_update(struct rte_eth_dev *dev,
struct rte_eth_link link;
int res;
- res = 0;
PMD_INIT_FUNC_TRACE();
res = octeontx_port_link_status(nic);
@@ -566,6 +531,7 @@ octeontx_dev_link_update(struct rte_eth_dev *dev,
case OCTEONTX_LINK_SPEED_RESERVE1:
case OCTEONTX_LINK_SPEED_RESERVE2:
default:
+ link.link_speed = ETH_SPEED_NUM_NONE;
octeontx_log_err("incorrect link speed %d", nic->speed);
break;
}
@@ -573,7 +539,7 @@ octeontx_dev_link_update(struct rte_eth_dev *dev,
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_autoneg = ETH_LINK_AUTONEG;
- return octeontx_atomic_write_link_status(dev, &link);
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -594,7 +560,7 @@ octeontx_dev_stats_reset(struct rte_eth_dev *dev)
octeontx_port_stats_clr(nic);
}
-static void
+static int
octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
@@ -605,6 +571,8 @@ octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
if (ret != 0)
octeontx_log_err("failed to set MAC address on port %d",
nic->port_id);
+
+ return ret;
}
static void
@@ -619,28 +587,25 @@ octeontx_dev_info(struct rte_eth_dev *dev,
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
ETH_LINK_SPEED_40G;
- dev_info->driver_name = RTE_STR(rte_octeontx_pmd);
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = PKI_MAX_PKTLEN;
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = PKO_MAX_NUM_DQ;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = 0,
.rx_drop_en = 0,
+ .offloads = OCTEONTX_RX_OFFLOADS,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = 0,
- .txq_flags =
- ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS |
- ETH_TXQ_FLAGS_NOXSUMS,
+ .offloads = OCTEONTX_TX_OFFLOADS,
};
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MT_LOCKFREE;
+ dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS;
+ dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS;
}
static void
@@ -744,7 +709,7 @@ octeontx_dev_tx_queue_release(void *tx_queue)
static int
octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t nb_desc, unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct octeontx_nic *nic = octeontx_pmd_priv(dev);
struct octeontx_txq *txq = NULL;
@@ -753,7 +718,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
RTE_SET_USED(nb_desc);
RTE_SET_USED(socket_id);
- RTE_SET_USED(tx_conf);
dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
@@ -1039,7 +1003,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
char octtx_name[OCTEONTX_MAX_NAME_LEN];
struct octeontx_nic *nic = NULL;
struct rte_eth_dev *eth_dev = NULL;
- struct rte_eth_dev_data *data = NULL;
+ struct rte_eth_dev_data *data;
const char *name = rte_vdev_device_name(dev);
PMD_INIT_FUNC_TRACE();
@@ -1052,16 +1016,10 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
eth_dev->tx_pkt_burst = octeontx_xmit_pkts;
eth_dev->rx_pkt_burst = octeontx_recv_pkts;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
- data = rte_zmalloc_socket(octtx_name, sizeof(*data), 0, socket_id);
- if (data == NULL) {
- octeontx_log_err("failed to allocate devdata");
- res = -ENOMEM;
- goto err;
- }
-
nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id);
if (nic == NULL) {
octeontx_log_err("failed to allocate nic structure");
@@ -1097,11 +1055,9 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
eth_dev->data->kdrv = RTE_KDRV_NONE;
eth_dev->data->numa_node = dev->device.numa_node;
- rte_memcpy(data, (eth_dev)->data, sizeof(*data));
+ data = eth_dev->data;
data->dev_private = nic;
-
data->port_id = eth_dev->data->port_id;
- snprintf(data->name, sizeof(data->name), "%s", eth_dev->data->name);
nic->ev_queues = 1;
nic->ev_ports = 1;
@@ -1120,7 +1076,6 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
goto err;
}
- eth_dev->data = data;
eth_dev->dev_ops = &octeontx_dev_ops;
/* Finally save ethdev pointer to the NIC structure */
@@ -1146,10 +1101,11 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7]
[(nic->base_ochan >> 4) & 0xF] = data->port_id;
+ rte_eth_dev_probing_finish(eth_dev);
return data->port_id;
err:
- if (port)
+ if (nic)
octeontx_port_close(nic);
if (eth_dev != NULL) {
@@ -1188,7 +1144,6 @@ octeontx_remove(struct rte_vdev_device *dev)
rte_free(eth_dev->data->mac_addrs);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
rte_event_dev_close(nic->evdev);
}
@@ -1210,12 +1165,27 @@ octeontx_probe(struct rte_vdev_device *dev)
struct rte_event_dev_config dev_conf;
const char *eventdev_name = "event_octeontx";
struct rte_event_dev_info info;
+ struct rte_eth_dev *eth_dev;
struct octeontx_vdev_init_params init_params = {
OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
};
dev_name = rte_vdev_device_name(dev);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(dev_name);
+ if (!eth_dev) {
+ RTE_LOG(ERR, PMD, "Failed to probe %s\n", dev_name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &octeontx_dev_ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
res = octeontx_parse_vdev_init_params(&init_params, dev);
if (res < 0)
return -EINVAL;
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index 10e42e14..14f16969 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -28,6 +28,10 @@
#define OCTEONTX_MAX_BGX_PORTS 4
#define OCTEONTX_MAX_LMAC_PER_BGX 4
+#define OCTEONTX_RX_OFFLOADS (DEV_RX_OFFLOAD_CRC_STRIP \
+ | DEV_RX_OFFLOAD_CHECKSUM)
+#define OCTEONTX_TX_OFFLOADS DEV_TX_OFFLOAD_MT_LOCKFREE
+
static inline struct octeontx_nic *
octeontx_pmd_priv(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/pcap/meson.build b/drivers/net/pcap/meson.build
index 8b81214e..0c4e0201 100644
--- a/drivers/net/pcap/meson.build
+++ b/drivers/net/pcap/meson.build
@@ -1,22 +1,12 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-if meson.is_cross_build()
- pcap_dep = cc.find_library('pcap', required: false)
- if pcap_dep.found()
- ext_deps += pcap_dep
- else
- build = false
- endif
+pcap_dep = cc.find_library('pcap', required: false)
+if pcap_dep.found() and cc.has_header('pcap.h', dependencies: pcap_dep)
+ build = true
else
- pcap_dep = dependency('pcap', required: false)
- if pcap_dep.found() == true
- ext_deps += pcap_dep
- elif find_program('pcap-config', required: false).found() == true
- ext_deps += cc.find_library('pcap')
- else
- build = false
- endif
+ build = false
endif
sources = files('rte_eth_pcap.c')
+ext_deps += pcap_dep
pkgconfig_extra_libs += '-lpcap'
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index c1571e1f..6bd4a7d7 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -96,9 +96,15 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG,
+ .link_autoneg = ETH_LINK_FIXED,
};
+static int eth_pcap_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static int
eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
const u_char *data, uint16_t data_len)
@@ -256,8 +262,8 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
pcap_dump((u_char *)dumper_q->dumper, &header,
tx_pcap_data);
} else {
- RTE_LOG(ERR, PMD,
- "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n",
+ PMD_LOG(ERR,
+ "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
mbuf->pkt_len,
ETHER_MAX_JUMBO_FRAME_LEN);
@@ -313,8 +319,8 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
ret = pcap_sendpacket(tx_queue->pcap,
tx_pcap_data, mbuf->pkt_len);
} else {
- RTE_LOG(ERR, PMD,
- "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n",
+ PMD_LOG(ERR,
+ "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
mbuf->pkt_len,
ETHER_MAX_JUMBO_FRAME_LEN);
@@ -346,7 +352,7 @@ open_iface_live(const char *iface, pcap_t **pcap) {
RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
if (*pcap == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
+ PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
return -1;
}
@@ -357,7 +363,7 @@ static int
open_single_iface(const char *iface, pcap_t **pcap)
{
if (open_iface_live(iface, pcap) < 0) {
- RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
+ PMD_LOG(ERR, "Couldn't open interface %s", iface);
return -1;
}
@@ -376,7 +382,7 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
*/
tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
if (tx_pcap == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
+ PMD_LOG(ERR, "Couldn't create dead pcap");
return -1;
}
@@ -384,7 +390,7 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
*dumper = pcap_dump_open(tx_pcap, pcap_filename);
if (*dumper == NULL) {
pcap_close(tx_pcap);
- RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n",
+ PMD_LOG(ERR, "Couldn't open %s for writing.",
pcap_filename);
return -1;
}
@@ -398,7 +404,7 @@ open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
{
*pcap = pcap_open_offline(pcap_filename, errbuf);
if (*pcap == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename,
+ PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
errbuf);
return -1;
}
@@ -773,27 +779,16 @@ pmd_init_internals(struct rte_vdev_device *vdev,
struct pmd_internals **internals,
struct rte_eth_dev **eth_dev)
{
- struct rte_eth_dev_data *data = NULL;
+ struct rte_eth_dev_data *data;
unsigned int numa_node = vdev->device.numa_node;
- const char *name;
- name = rte_vdev_device_name(vdev);
- RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %d\n",
+ PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
numa_node);
- /* now do all data allocation - for eth_dev structure
- * and internal (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- return -1;
-
/* reserve an ethdev entry */
*eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
- if (*eth_dev == NULL) {
- rte_free(data);
+ if (!(*eth_dev))
return -1;
- }
/* now put it all together
* - store queue data in internals,
@@ -802,7 +797,7 @@ pmd_init_internals(struct rte_vdev_device *vdev,
* - and point eth_dev structure to new eth_dev_data structure
*/
*internals = (*eth_dev)->data->dev_private;
- rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+ data = (*eth_dev)->data;
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
@@ -812,7 +807,6 @@ pmd_init_internals(struct rte_vdev_device *vdev,
* NOTE: we'll replace the data element, of originally allocated
* eth_dev so the rings are local per-process
*/
- (*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
return 0;
@@ -899,6 +893,7 @@ eth_from_pcaps(struct rte_vdev_device *vdev,
else
eth_dev->tx_pkt_burst = eth_pcap_tx;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -910,16 +905,30 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
struct rte_kvargs *kvlist;
struct pmd_devargs pcaps = {0};
struct pmd_devargs dumpers = {0};
+ struct rte_eth_dev *eth_dev;
int single_iface = 0;
int ret;
name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
+ PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
gettimeofday(&start_time, NULL);
start_cycles = rte_get_timer_cycles();
hz = rte_get_timer_hz();
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL)
return -1;
@@ -1008,7 +1017,7 @@ pmd_pcap_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
- RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %d\n",
+ PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
rte_socket_id());
if (!dev)
@@ -1020,7 +1029,6 @@ pmd_pcap_remove(struct rte_vdev_device *dev)
return -1;
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
@@ -1040,3 +1048,12 @@ RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
ETH_PCAP_RX_IFACE_ARG "=<ifc> "
ETH_PCAP_TX_IFACE_ARG "=<ifc> "
ETH_PCAP_IFACE_ARG "=<ifc>");
+
+RTE_INIT(eth_pcap_init_log);
+static void
+eth_pcap_init_log(void)
+{
+ eth_pcap_logtype = rte_log_register("pmd.net.pcap");
+ if (eth_pcap_logtype >= 0)
+ rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/qede/LICENSE.qede_pmd b/drivers/net/qede/LICENSE.qede_pmd
index c7cbdccc..022085a1 100644
--- a/drivers/net/qede/LICENSE.qede_pmd
+++ b/drivers/net/qede/LICENSE.qede_pmd
@@ -1,28 +1,3 @@
-/*
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of QLogic Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
*/
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index ccbffa45..c30a867e 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -1,6 +1,6 @@
-# Copyright (c) 2016 QLogic Corporation.
+# Copyright (c) 2016 - 2018 Cavium Inc.
# All rights reserved.
-# www.qlogic.com
+# www.cavium.com
#
# See LICENSE.qede_pmd for copyright and licensing details.
@@ -73,8 +73,7 @@ ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev
CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion
endif
else #ICC
-CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type
-CFLAGS_qede_ethdev.o += -wd279 #279: controlling expression is constant
+CFLAGS_qede_ethdev.o += -diag-disable 279 #279: controlling expression is constant
endif
#
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index fe42f325..ca1c2b11 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -133,10 +133,10 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
if (core_id == (unsigned int)LCORE_ID_ANY)
- core_id = 0;
+ core_id = rte_get_master_lcore();
socket_id = rte_lcore_to_socket_id(core_id);
- mz = rte_memzone_reserve_aligned(mz_name, size,
- socket_id, 0, RTE_CACHE_LINE_SIZE);
+ mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
if (!mz) {
DP_ERR(p_dev, "Unable to allocate DMA memory "
"of size %zu bytes - %s\n",
@@ -172,9 +172,10 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
if (core_id == (unsigned int)LCORE_ID_ANY)
- core_id = 0;
+ core_id = rte_get_master_lcore();
socket_id = rte_lcore_to_socket_id(core_id);
- mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align);
+ mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
if (!mz) {
DP_ERR(p_dev, "Unable to allocate DMA memory "
"of size %zu bytes - %s\n",
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 52c2f0ec..27090c79 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index 9a6059ac..52507844 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -96,10 +96,10 @@
/****************************************************************************/
-#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 30
-#define FW_REVISION_VERSION 12
-#define FW_ENGINEERING_VERSION 0
+#define FW_MAJOR_VERSION 8
+#define FW_MINOR_VERSION 33
+#define FW_REVISION_VERSION 12
+#define FW_ENGINEERING_VERSION 0
/***********************/
/* COMMON HW CONSTANTS */
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index ce5f3a90..57d6aa95 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -41,6 +41,9 @@
((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
(FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
+#define IS_ECORE_PACING(p_hwfn) \
+ (!!(p_hwfn->b_en_pacing))
+
#define MAX_HWFNS_PER_DEVICE 2
#define NAME_SIZE 128 /* @DPDK */
#define ECORE_WFQ_UNIT 100
@@ -432,8 +435,10 @@ struct ecore_hw_info {
#define DMAE_MAX_RW_SIZE 0x2000
struct ecore_dmae_info {
- /* Mutex for synchronizing access to functions */
- osal_mutex_t mutex;
+ /* Spinlock for synchronizing access to functions */
+ osal_spinlock_t lock;
+
+ bool b_mem_ready;
u8 channel;
@@ -534,6 +539,12 @@ enum ecore_mf_mode_bit {
ECORE_MF_UFP_SPECIFIC,
ECORE_MF_DISABLE_ARFS,
+
+ /* Use vlan for steering */
+ ECORE_MF_8021Q_TAGGING,
+
+ /* Use stag for steering */
+ ECORE_MF_8021AD_TAGGING,
};
enum ecore_ufp_mode {
@@ -672,6 +683,13 @@ struct ecore_hwfn {
/* Mechanism for recovering from doorbell drop */
struct ecore_db_recovery_info db_recovery_info;
+ /* Enable/disable pacing, if request to enable then
+ * IOV and mcos configuration will be skipped.
+ * this actually reflects the value requested in
+ * struct ecore_hw_prepare_params by ecore client.
+ */
+ bool b_en_pacing;
+
/* @DPDK */
struct ecore_ptt *p_arfs_ptt;
};
@@ -924,12 +942,16 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
#define PQ_FLAGS_ACK (1 << 4)
#define PQ_FLAGS_OFLD (1 << 5)
#define PQ_FLAGS_VFS (1 << 6)
+#define PQ_FLAGS_LLT (1 << 7)
/* physical queue index for cm context intialization */
u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
-u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+
+/* qm vport for rate limit configuration */
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
diff --git a/drivers/net/qede/base/ecore_attn_values.h b/drivers/net/qede/base/ecore_attn_values.h
index d8951bcc..d893e0a6 100644
--- a/drivers/net/qede/base/ecore_attn_values.h
+++ b/drivers/net/qede/base/ecore_attn_values.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index d8f69ad6..0b797460 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -526,7 +526,7 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
p_chain->p_prod_elem = p_chain->p_virt_addr;
if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
- /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+ /* Use "page_cnt-1" as a reset value for the prod/cons page's
* indices, to avoid unnecessary page advancing on the first
* call to ecore_chain_produce/consume. Instead, the indices
* will be advanced to page_cnt and then will be wrapped to 0.
@@ -726,6 +726,21 @@ out:
static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ /* Use "prod_idx-1" since ecore_chain_produce() advances the
+ * page index before the producer index when getting to
+ * "next_page_mask".
+ */
+ u32 elem_idx =
+ (prod_idx - 1 + p_chain->capacity) % p_chain->capacity;
+ u32 page_idx = elem_idx / p_chain->elem_per_page;
+
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.prod_page_idx = (u16)page_idx;
+ else
+ p_chain->pbl.c.u32.prod_page_idx = page_idx;
+ }
+
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16)prod_idx;
else
@@ -734,6 +749,38 @@ static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
}
/**
+ * @brief ecore_chain_set_cons - sets the cons to the given value
+ *
+ * @param cons_idx
+ * @param p_cons_elem
+ */
+static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain,
+ u32 cons_idx, void *p_cons_elem)
+{
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ /* Use "cons_idx-1" since ecore_chain_consume() advances the
+ * page index before the consumer index when getting to
+ * "next_page_mask".
+ */
+ u32 elem_idx =
+ (cons_idx - 1 + p_chain->capacity) % p_chain->capacity;
+ u32 page_idx = elem_idx / p_chain->elem_per_page;
+
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.cons_page_idx = (u16)page_idx;
+ else
+ p_chain->pbl.c.u32.cons_page_idx = page_idx;
+ }
+
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx = (u16)cons_idx;
+ else
+ p_chain->u.chain32.cons_idx = cons_idx;
+
+ p_chain->p_cons_elem = p_cons_elem;
+}
+
+/**
* @brief ecore_chain_pbl_zero_mem - set chain memory to 0
*
* @param p_chain
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 50bd66da..a91b2ff3 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -834,7 +834,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->t2_num_pages *
sizeof(struct ecore_dma_mem));
if (!p_mngr->t2) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
rc = ECORE_NOMEM;
goto t2_fail;
}
@@ -919,6 +919,9 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 ilt_size, i;
+ if (p_mngr->ilt_shadow == OSAL_NULL)
+ return;
+
ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
@@ -931,6 +934,7 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
p_dma->p_virt = OSAL_NULL;
}
OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
+ p_mngr->ilt_shadow = OSAL_NULL;
}
static enum _ecore_status_t
@@ -1000,8 +1004,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
size * sizeof(struct ecore_dma_mem));
if (!p_mngr->ilt_shadow) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate ilt shadow table\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
rc = ECORE_NOMEM;
goto ilt_shadow_fail;
}
@@ -1044,12 +1047,14 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
for (type = 0; type < MAX_CONN_TYPES; type++) {
OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].cid_map = OSAL_NULL;
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
OSAL_FREE(p_hwfn->p_dev,
p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
p_mngr->acquired_vf[type][vf].max_count = 0;
p_mngr->acquired_vf[type][vf].start_cid = 0;
}
@@ -1126,8 +1131,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
if (!p_mngr) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_cxt_mngr'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
return ECORE_NOMEM;
}
@@ -1189,21 +1193,21 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
/* Allocate the ILT shadow table */
rc = ecore_ilt_shadow_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
goto tables_alloc_fail;
}
/* Allocate the T2 table */
rc = ecore_cxt_src_t2_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
goto tables_alloc_fail;
}
/* Allocate and initialize the acquired cids bitmaps */
rc = ecore_cid_map_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
goto tables_alloc_fail;
}
@@ -1427,7 +1431,8 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
}
}
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool is_pf_loading)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
struct ecore_mcp_link_state *p_link;
@@ -1438,8 +1443,9 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
- ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
- p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
+ ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ qm_info->max_phys_tcs_per_port,
+ is_pf_loading,
iids.cids, iids.vf_cids, iids.tids,
qm_info->start_pq,
qm_info->num_pqs - qm_info->num_vf_pqs,
@@ -1797,7 +1803,7 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
- ecore_qm_init_pf(p_hwfn, p_ptt);
+ ecore_qm_init_pf(p_hwfn, p_ptt, true);
ecore_cm_init_pf(p_hwfn);
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 54761e4e..3bcbe8f1 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -107,8 +107,10 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
*
* @param p_hwfn
* @param p_ptt
+ * @param is_pf_loading
*/
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool is_pf_loading);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
index 6d87620d..65509add 100644
--- a/drivers/net/qede/base/ecore_cxt_api.h
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 21ddda92..9b8d39f6 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -149,6 +149,10 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
}
p_data->arr[type].update = UPDATE_DCB_DSCP;
+ /* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ p_data->arr[type].dont_add_vlan0 = true;
+
/* QM reconf data */
if (p_hwfn->hw_info.personality == personality)
p_hwfn->hw_info.offload_tc = tc;
@@ -910,7 +914,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_hwfn->p_dcbx_info));
if (!p_hwfn->p_dcbx_info) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_dcbx_info'");
return ECORE_NOMEM;
}
@@ -935,6 +939,7 @@ static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
p_data->dcb_tc = p_src->arr[type].tc;
p_data->dscp_enable_flag = p_src->arr[type].dscp_enable;
p_data->dscp_val = p_src->arr[type].dscp_val;
+ p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
}
/* Set pf update ramrod command params */
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index 469e42dd..49df62ce 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 9ff4df4c..2ad1def4 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -29,6 +29,7 @@ struct ecore_dcbx_app_data {
u8 tc; /* Traffic Class */
bool dscp_enable; /* DSCP enabled */
u8 dscp_val; /* DSCP value */
+ bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
};
#ifndef __EXTRACT__LINUX__
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 744d2043..4ebbedd6 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -39,7 +39,7 @@
* there's more than a single compiled ecore component in system].
*/
static osal_spinlock_t qm_lock;
-static bool qm_lock_init;
+static u32 qm_lock_ref_cnt;
/******************** Doorbell Recovery *******************/
/* The doorbell recovery mechanism consists of a list of entries which represent
@@ -227,7 +227,8 @@ enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
+ return ECORE_NOMEM;
#endif
OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
p_hwfn->db_recovery_info.db_recovery_counter = 0;
@@ -411,7 +412,7 @@ void ecore_init_dp(struct ecore_dev *p_dev,
}
}
-void ecore_init_struct(struct ecore_dev *p_dev)
+enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
{
u8 i;
@@ -423,9 +424,10 @@ void ecore_init_struct(struct ecore_dev *p_dev)
p_hwfn->b_active = false;
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
+ goto handle_err;
#endif
- OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
}
/* hwfn 0 is always active */
@@ -433,6 +435,17 @@ void ecore_init_struct(struct ecore_dev *p_dev)
/* set the default cache alignment to 128 (may be overridden later) */
p_dev->cache_shift = 7;
+ return ECORE_SUCCESS;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+handle_err:
+ while (--i) {
+ struct ecore_hwfn *p_hwfn = OSAL_NULL;
+
+ p_hwfn = &p_dev->hwfns[i];
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
+ }
+ return ECORE_NOMEM;
+#endif
}
static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
@@ -500,11 +513,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
/* feature flags */
if (IS_ECORE_SRIOV(p_hwfn->p_dev))
flags |= PQ_FLAGS_VFS;
+ if (IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_RLS;
/* protocol flags */
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH:
- flags |= PQ_FLAGS_MCOS;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
break;
case ECORE_PCI_FCOE:
flags |= PQ_FLAGS_OFLD;
@@ -513,11 +529,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
break;
case ECORE_PCI_ETH_ROCE:
- flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+ flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
break;
case ECORE_PCI_ETH_IWARP:
- flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
- PQ_FLAGS_OFLD;
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
break;
default:
DP_ERR(p_hwfn, "unknown personality %d\n",
@@ -721,6 +740,7 @@ static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
"pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
/* init pq params */
+ qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
qm_info->num_vports;
qm_info->qm_pq_params[pq_idx].tc_id = tc;
@@ -823,7 +843,7 @@ u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
}
-u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
{
u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
@@ -833,6 +853,23 @@ u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
}
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+ u16 start_pq, pq, qm_pq_idx;
+
+ pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+ start_pq = p_hwfn->qm_info.start_pq;
+ qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE;
+
+ if (qm_pq_idx > p_hwfn->qm_info.num_pqs) {
+ DP_ERR(p_hwfn,
+ "qm_pq_idx %d must be smaller than %d\n",
+ qm_pq_idx, p_hwfn->qm_info.num_pqs);
+ }
+
+ return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id;
+}
+
/* Functions for creating specific types of pqs */
static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
{
@@ -1025,10 +1062,9 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
for (i = 0; i < qm_info->num_pqs; i++) {
pq = &qm_info->qm_pq_params[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "pq idx %d, vport_id %d, tc %d, wrr_grp %d,"
- " rl_valid %d\n",
- qm_info->start_pq + i, pq->vport_id, pq->tc_id,
- pq->wrr_group, pq->rl_valid);
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ qm_info->start_pq + i, pq->port_id, pq->vport_id,
+ pq->tc_id, pq->wrr_group, pq->rl_valid);
}
}
@@ -1083,7 +1119,7 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
ecore_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- ecore_qm_init_pf(p_hwfn, p_ptt);
+ ecore_qm_init_pf(p_hwfn, p_ptt, false);
/* activate init tool on runtime array */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -1289,16 +1325,14 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
/* DMA info initialization */
rc = ecore_dmae_info_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for dmae_info"
- " structure\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
/* DCBX initialization */
rc = ecore_dcbx_info_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate memory for dcbx structure\n");
goto alloc_err;
}
@@ -1307,7 +1341,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->reset_stats));
if (!p_dev->reset_stats) {
- DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n");
goto alloc_no_mem;
}
@@ -1658,7 +1692,8 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_init_cache_line_size(p_hwfn, p_ptt);
- rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn),
+ hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
@@ -2160,6 +2195,11 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
/* perform debug configuration when chip is out of reset */
OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
+ /* Sanity check before the PF init sequence that uses DMAE */
+ rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+ if (rc)
+ return rc;
+
/* PF Init sequence */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc)
@@ -2205,42 +2245,43 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true,
"Function start ramrod failed\n");
} else {
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
-
- if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
- (1 << 2));
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
- 0x100);
- }
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH registers after start PFn\n");
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt,
- PRS_REG_SEARCH_TCP_FIRST_FRAG);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
- prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ return rc;
+ }
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+
+ if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
+ (1 << 2));
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
+ 0x100);
}
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH registers after start PFn\n");
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_SEARCH_TCP_FIRST_FRAG);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
+ prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
}
- return rc;
+ return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
@@ -2283,14 +2324,15 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
}
static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+ struct ecore_ptt *p_ptt)
{
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
1 << p_hwfn->abs_pf_id);
}
-static void
-ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req,
+static enum _ecore_status_t
+ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_load_req_params *p_load_req,
struct ecore_drv_load_params *p_drv_load)
{
/* Make sure that if ecore-client didn't provide inputs, all the
@@ -2302,15 +2344,51 @@ ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req,
OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
- if (p_drv_load != OSAL_NULL) {
- p_load_req->drv_role = p_drv_load->is_crash_kernel ?
- ECORE_DRV_ROLE_KDUMP :
- ECORE_DRV_ROLE_OS;
+ if (p_drv_load == OSAL_NULL)
+ goto out;
+
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load = p_drv_load->override_force_load;
+
+ /* Old MFW versions don't support timeout values other than default and
+ * none, so these values are replaced according to the fall-back action.
+ */
+
+ if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT ||
+ p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE ||
+ (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) {
p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
- p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
- p_load_req->override_force_load =
- p_drv_load->override_force_load;
+ goto out;
}
+
+ switch (p_drv_load->mfw_timeout_fallback) {
+ case ECORE_TO_FALLBACK_TO_NONE:
+ p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE;
+ break;
+ case ECORE_TO_FALLBACK_TO_DEFAULT:
+ p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
+ break;
+ case ECORE_TO_FALLBACK_FAIL_LOAD:
+ DP_NOTICE(p_hwfn, false,
+ "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n",
+ p_drv_load->mfw_timeout_val,
+ ECORE_LOAD_REQ_LOCK_TO_DEFAULT,
+ ECORE_LOAD_REQ_LOCK_TO_NONE);
+ return ECORE_ABORTED;
+ }
+
+ DP_INFO(p_hwfn,
+ "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n",
+ p_drv_load->mfw_timeout_val,
+ (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ?
+ "default" : "none",
+ p_load_req->timeout_val);
+out:
+ return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
@@ -2366,12 +2444,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
- ecore_fill_load_req_params(&load_req_params,
- p_params->p_drv_load_params);
+ ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
+
+ rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
+ p_params->p_drv_load_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
&load_req_params);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed sending a LOAD_REQ command\n");
return rc;
}
@@ -2404,10 +2487,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
- if (!qm_lock_init) {
+ if (!qm_lock_ref_cnt) {
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock);
+ if (rc) {
+ DP_ERR(p_hwfn, "qm_lock allocation failed\n");
+ goto qm_lock_fail;
+ }
+#endif
OSAL_SPIN_LOCK_INIT(&qm_lock);
- qm_lock_init = true;
}
+ ++qm_lock_ref_cnt;
/* Clean up chip from previous driver if such remains exist.
* This is not needed when the PF is the first one on the
@@ -2424,7 +2514,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
/* Log and clean previous pglue_b errors if such exist */
- ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
/* Enable the PF's internal FID_enable in the PXP */
@@ -2462,15 +2552,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"init phase failed for loadcode 0x%x (rc %d)\n",
load_code, rc);
goto load_err;
}
rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Sending load done failed, rc = %d\n", rc);
+ if (rc == ECORE_NOMEM) {
+ DP_NOTICE(p_hwfn, false,
+ "Sending load done was failed due to memory allocation failure\n");
+ goto load_err;
+ }
return rc;
+ }
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
@@ -2480,7 +2578,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
&param);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to send DCBX attention request\n");
return rc;
}
@@ -2513,6 +2611,12 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
return rc;
load_err:
+ --qm_lock_ref_cnt;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (!qm_lock_ref_cnt)
+ OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
+qm_lock_fail:
+#endif
/* The MFW load lock should be released regardless of success or failure
* of initialization.
* TODO: replace this with an attempt to send cancel_load.
@@ -2547,8 +2651,8 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
if (i < ECORE_HW_STOP_RETRY_LIMIT)
return;
- DP_NOTICE(p_hwfn, true, "Timers linear scans are not over"
- " [Connection %02x Tasks %02x]\n",
+ DP_NOTICE(p_hwfn, false,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
(u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
(u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
}
@@ -2613,7 +2717,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
if (!p_dev->recov_in_prog) {
rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed sending a UNLOAD_REQ command. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
@@ -2628,7 +2732,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
rc = ecore_sp_pf_stop(p_hwfn);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
@@ -2682,10 +2786,21 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+ --qm_lock_ref_cnt;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (!qm_lock_ref_cnt)
+ OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
+#endif
+
if (!p_dev->recov_in_prog) {
- ecore_mcp_unload_done(p_hwfn, p_ptt);
+ rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc == ECORE_NOMEM) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n");
+ rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
+ }
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed sending a UNLOAD_DONE command. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
@@ -2936,7 +3051,7 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
resc_max_val, p_mcp_resp);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"MFW response failure for a max value setting of resource %d [%s]\n",
res_id, ecore_hw_get_resc_name(res_id));
return rc;
@@ -3496,9 +3611,14 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
break;
case NVM_CFG1_GLOB_MF_MODE_UFP:
p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
- 1 << ECORE_MF_UFP_SPECIFIC;
+ 1 << ECORE_MF_UFP_SPECIFIC |
+ 1 << ECORE_MF_8021Q_TAGGING;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_8021AD_TAGGING;
break;
-
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
1 << ECORE_MF_LLH_PROTO_CLSS |
@@ -3527,6 +3647,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
*/
switch (mf_mode) {
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ case NVM_CFG1_GLOB_MF_MODE_BD:
p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
@@ -3780,8 +3901,13 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool drv_resc_alloc = p_params->drv_resc_alloc;
enum _ecore_status_t rc;
+ if (IS_ECORE_PACING(p_hwfn)) {
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV,
+ "Skipping IOV as packet pacing is requested\n");
+ }
+
/* Since all information is common, only first hwfns should do this */
- if (IS_LEAD_HWFN(p_hwfn)) {
+ if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) {
rc = ecore_iov_hw_info(p_hwfn);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
@@ -3866,7 +3992,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
* that can result in performance penalty in some cases. 4
* represents a good tradeoff between performance and flexibility.
*/
- p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+ if (IS_ECORE_PACING(p_hwfn))
+ p_hwfn->hw_info.num_hw_tc = 1;
+ else
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
/* start out with a single active tc. This can be increased either
* by dcbx negotiation or by upper layer driver
@@ -4037,7 +4166,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Allocate PTT pool */
rc = ecore_ptt_pool_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+ DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err0;
@@ -4062,7 +4191,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Initialize MCP structure */
rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+ DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err1;
@@ -4072,7 +4201,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
p_params->personality, p_params);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
+ DP_NOTICE(p_hwfn, false, "Failed to get HW information\n");
goto err2;
}
@@ -4115,7 +4244,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Allocate the init RT array and initialize the init-ops engine */
rc = ecore_init_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err2;
@@ -4153,6 +4282,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
p_dev->allow_mdump = p_params->allow_mdump;
+ p_hwfn->b_en_pacing = p_params->b_en_pacing;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -4188,6 +4318,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
BAR_ID_1) / 2;
p_doorbell = (void OSAL_IOMEM *)addr;
+ p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing;
/* prepare second hw function */
rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
p_doorbell, p_params);
@@ -4205,8 +4336,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
ecore_mcp_free(p_hwfn);
ecore_hw_hwfn_free(p_hwfn);
} else {
- DP_NOTICE(p_dev, true,
- "What do we need to free when VF hwfn1 init fails\n");
+ DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n");
}
return rc;
}
@@ -4237,7 +4367,7 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
ecore_mcp_free(p_hwfn);
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
#endif
}
@@ -4368,7 +4498,7 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
@@ -4401,7 +4531,7 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
- DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
@@ -4425,7 +4555,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
size = page_cnt * sizeof(*pp_virt_addr_tbl);
pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
if (!pp_virt_addr_tbl) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Failed to allocate memory for the chain virtual addresses table\n");
return ECORE_NOMEM;
}
@@ -4449,7 +4579,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt) {
- DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n");
return ECORE_NOMEM;
}
@@ -4457,7 +4587,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
@@ -4497,7 +4627,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
page_cnt);
if (rc) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Cannot allocate a chain with the given arguments:\n"
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 98bcabe8..0dd78d60 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -32,7 +32,7 @@ void ecore_init_dp(struct ecore_dev *p_dev,
*
* @param p_dev
*/
-void ecore_init_struct(struct ecore_dev *p_dev);
+enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_free -
@@ -57,6 +57,12 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
*/
void ecore_resc_setup(struct ecore_dev *p_dev);
+enum ecore_mfw_timeout_fallback {
+ ECORE_TO_FALLBACK_TO_NONE,
+ ECORE_TO_FALLBACK_TO_DEFAULT,
+ ECORE_TO_FALLBACK_FAIL_LOAD,
+};
+
enum ecore_override_force_load {
ECORE_OVERRIDE_FORCE_LOAD_NONE,
ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
@@ -79,6 +85,11 @@ struct ecore_drv_load_params {
#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
+ /* Action to take in case the MFW doesn't support timeout values other
+ * than default and none.
+ */
+ enum ecore_mfw_timeout_fallback mfw_timeout_fallback;
+
/* Avoid engine reset when first PF loads on it */
bool avoid_eng_reset;
@@ -104,6 +115,9 @@ struct ecore_hw_init_params {
/* Driver load parameters */
struct ecore_drv_load_params *p_drv_load_params;
+
+ /* SPQ block timeout in msec */
+ u32 spq_timeout_ms;
};
/**
@@ -256,6 +270,9 @@ struct ecore_hw_prepare_params {
*/
bool b_relaxed_probe;
enum ecore_hw_prepare_result p_relaxed_res;
+
+ /* Enable/disable request by ecore client for pacing */
+ bool b_en_pacing;
};
/**
@@ -363,6 +380,7 @@ struct ecore_eth_stats_common {
u64 tx_mac_mc_packets;
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
+ u64 link_change_count;
};
struct ecore_eth_stats_bb {
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
index 2acd864d..ac29dc49 100644
--- a/drivers/net/qede/base/ecore_gtt_reg_addr.h
+++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h
index 2ddc5f19..d9af94df 100644
--- a/drivers/net/qede/base/ecore_gtt_values.h
+++ b/drivers/net/qede/base/ecore_gtt_values.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index d8abd604..d400fa91 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -381,7 +381,7 @@ struct e4_xstorm_core_conn_ag_ctx {
__le16 reserved16 /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_or_spq_prod /* word4 */;
- __le16 word5 /* word5 */;
+ __le16 updated_qm_pq_id /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
@@ -904,8 +904,10 @@ struct core_rx_start_ramrod_data {
/* if set, 802.1q tags will be removed and copied to CQE */
/* if set, 802.1q tags will be removed and copied to CQE */
u8 inner_vlan_stripping_en;
-/* if set, outer tag wont be stripped, valid only in MF OVLAN. */
- u8 outer_vlan_stripping_dis;
+/* if set and inner vlan does not exist, the outer vlan will copied to CQE as
+ * inner vlan. should be used in MF_OVLAN mode only.
+ */
+ u8 report_outer_vlan;
u8 queue_id /* Light L2 RX Queue ID */;
u8 main_func_queue /* Is this the main queue for the PF */;
/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
@@ -946,7 +948,9 @@ struct core_tx_bd_data {
/* Do not allow additional VLAN manipulations on this packet (DCB) */
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
-/* Insert VLAN into packet */
+/* Insert VLAN into packet. Cannot be set for LB packets
+ * (tx_dst == CORE_TX_DEST_LB)
+ */
#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
/* This is the first BD of the packet (for debug) */
@@ -1069,11 +1073,11 @@ struct core_tx_update_ramrod_data {
* Enum flag for what type of dcb data to update
*/
enum dcb_dscp_update_mode {
-/* use when no change should be done to dcb data */
+/* use when no change should be done to DCB data */
DONT_UPDATE_DCB_DSCP,
- UPDATE_DCB /* use to update only l2 (vlan) priority */,
- UPDATE_DSCP /* use to update only l3 dscp */,
- UPDATE_DCB_DSCP /* update vlan pri and dscp */,
+ UPDATE_DCB /* use to update only L2 (vlan) priority */,
+ UPDATE_DSCP /* use to update only IP DSCP */,
+ UPDATE_DCB_DSCP /* update vlan pri and DSCP */,
MAX_DCB_DSCP_UPDATE_FLAG
};
@@ -1291,10 +1295,15 @@ enum fw_flow_ctrl_mode {
* GFT profile type.
*/
enum gft_profile_type {
- GFT_PROFILE_TYPE_4_TUPLE /* 4 tuple, IP type and L4 type match. */,
-/* L4 destination port, IP type and L4 type match. */
+/* tunnel type, inner 4 tuple, IP type and L4 type match. */
+ GFT_PROFILE_TYPE_4_TUPLE,
+/* tunnel type, inner L4 destination port, IP type and L4 type match. */
GFT_PROFILE_TYPE_L4_DST_PORT,
- GFT_PROFILE_TYPE_IP_DST_PORT /* IP destination port and IP type. */,
+/* tunnel type, inner IP destination address and IP type match. */
+ GFT_PROFILE_TYPE_IP_DST_ADDR,
+/* tunnel type, inner IP source address and IP type match. */
+ GFT_PROFILE_TYPE_IP_SRC_ADDR,
+ GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */,
MAX_GFT_PROFILE_TYPE
};
@@ -1411,8 +1420,9 @@ struct vlan_header {
* outer tag configurations
*/
struct outer_tag_config_struct {
-/* Enables the STAG Priority Change , Should be 1 for Bette Davis and UFP with
- * Host Control mode. Else - 0
+/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
+ * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
+ * else - 0.
*/
u8 enable_stag_pri_change;
/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
@@ -1507,15 +1517,18 @@ struct pf_start_ramrod_data {
/*
- * Data for port update ramrod
+ * Per protocol DCB data
*/
struct protocol_dcb_data {
- u8 dcb_enable_flag /* dcbEnable flag value */;
- u8 dscp_enable_flag /* If set use dscp value */;
- u8 dcb_priority /* dcbPri flag value */;
- u8 dcb_tc /* dcb TC value */;
- u8 dscp_val /* dscp value to write if dscp_enable_flag is set */;
- u8 reserved0;
+ u8 dcb_enable_flag /* Enable DCB */;
+ u8 dscp_enable_flag /* Enable updating DSCP value */;
+ u8 dcb_priority /* DCB priority */;
+ u8 dcb_tc /* DCB TC */;
+ u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */;
+/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged
+ * frames
+ */
+ u8 dcb_dont_add_vlan0;
};
/*
@@ -1575,8 +1588,9 @@ struct pf_update_ramrod_data {
/* core iwarp related fields */
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan /* new outer vlan id value */;
-/* enables the inner to outer TAG priority mapping. Should be 1 for Bette Davis
- * and UFP with Host Control mode, else - 0.
+/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
+ * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
+ * else - 0
*/
u8 enable_stag_pri_change;
u8 reserved;
@@ -1739,6 +1753,7 @@ struct tstorm_per_port_stat {
struct regpair eth_vxlan_tunn_filter_discard;
/* GENEVE dropped packets */
struct regpair eth_geneve_tunn_filter_discard;
+ struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
};
@@ -2130,6 +2145,53 @@ struct e4_ystorm_core_conn_ag_ctx {
};
+struct fw_asserts_ram_section {
+/* The offset of the section in the RAM in RAM lines (64-bit units) */
+ __le16 section_ram_line_offset;
+/* The size of the section in RAM lines (64-bit units) */
+ __le16 section_ram_line_size;
+/* The offset of the asserts list within the section in dwords */
+ u8 list_dword_offset;
+/* The size of an assert list element in dwords */
+ u8 list_element_dword_size;
+ u8 list_num_elements /* The number of elements in the asserts list */;
+/* The offset of the next list index field within the section in dwords */
+ u8 list_next_index_dword_offset;
+};
+
+
+struct fw_ver_num {
+ u8 major /* Firmware major version number */;
+ u8 minor /* Firmware minor version number */;
+ u8 rev /* Firmware revision version number */;
+ u8 eng /* Firmware engineering version number (for bootleg versions) */;
+};
+
+struct fw_ver_info {
+ __le16 tools_ver /* Tools version number */;
+ u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
+ u8 reserved1;
+ struct fw_ver_num num /* FW version number */;
+ __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver /* FW version information */;
+/* Info regarding the FW asserts section in the Storm RAM */
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+
+struct fw_info_location {
+ __le32 grc_addr /* GRC address where the fw_info struct is located. */;
+/* Size of the fw_info structure (thats located at the grc_addr). */
+ __le32 size;
+};
+
+
+
+
/*
* IGU cleanup command
*/
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index ebb66482..262834ed 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -225,7 +225,7 @@ enum bin_dbg_buffer_type {
* Attention bit mapping
*/
struct dbg_attn_bit_mapping {
- __le16 data;
+ u16 data;
/* The index of an attention in the blocks attentions list
* (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits
* (if is_unused_bit_cnt=1)
@@ -247,14 +247,14 @@ struct dbg_attn_block_type_data {
/* Offset of this block attention names in the debug attention name offsets
* array
*/
- __le16 names_offset;
- __le16 reserved1;
+ u16 names_offset;
+ u16 reserved1;
u8 num_regs /* Number of attention registers in this block */;
u8 reserved2;
/* Offset of this blocks attention registers in the attention registers array
* (in dbg_attn_reg units)
*/
- __le16 regs_offset;
+ u16 regs_offset;
};
/*
@@ -272,20 +272,20 @@ struct dbg_attn_block {
* Attention register result
*/
struct dbg_attn_reg_result {
- __le32 data;
+ u32 data;
/* STS attention register GRC address (in dwords) */
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
/* Number of attention indexes in this register */
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
-/* The offset of this registers attentions within the blocks attentions
- * list (a value in the range 0..number of block attentions-1)
+/* The offset of this registers attentions within the blocks attentions list
+ * (a value in the range 0..number of block attentions-1)
*/
- __le16 attn_idx_offset;
- __le16 reserved;
- __le32 sts_val /* Value read from the STS attention register */;
- __le32 mask_val /* Value read from the MASK attention register */;
+ u16 block_attn_offset;
+ u16 reserved;
+ u32 sts_val /* Value read from the STS attention register */;
+ u32 mask_val /* Value read from the MASK attention register */;
};
/*
@@ -303,7 +303,7 @@ struct dbg_attn_block_result {
/* Offset of this registers block attention names in the attention name offsets
* array
*/
- __le16 names_offset;
+ u16 names_offset;
/* result data for each register in the block in which at least one attention
* bit is set
*/
@@ -316,7 +316,7 @@ struct dbg_attn_block_result {
* mode header
*/
struct dbg_mode_hdr {
- __le16 data;
+ u16 data;
/* indicates if a mode expression should be evaluated (0/1) */
#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
@@ -331,12 +331,11 @@ struct dbg_mode_hdr {
* Attention register
*/
struct dbg_attn_reg {
- struct dbg_mode_hdr mode /* Mode header */;
-/* The offset of this registers attentions within the blocks attentions
- * list (a value in the range 0..number of block attentions-1)
+/* The offset of this registers attentions within the blocks attentions list
+ * (a value in the range 0..number of block attentions-1)
*/
- __le16 attn_idx_offset;
- __le32 data;
+ u16 block_attn_offset;
+ u32 data;
/* STS attention register GRC address (in dwords) */
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
@@ -344,9 +343,8 @@ struct dbg_attn_reg {
#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
/* STS_CLR attention register GRC address (in dwords) */
- __le32 sts_clr_address;
-/* MASK attention register GRC address (in dwords) */
- __le32 mask_address;
+ u32 sts_clr_address;
+ u32 mask_address /* MASK attention register GRC address (in dwords) */;
};
@@ -370,7 +368,7 @@ struct dbg_bus_block {
/* Indicates if this block has a latency events debug line (0/1). */
u8 has_latency_events;
/* Offset of this blocks lines in the Debug Bus lines array. */
- __le16 lines_offset;
+ u16 lines_offset;
};
@@ -383,7 +381,7 @@ struct dbg_bus_block_user_data {
/* Indicates if this block has a latency events debug line (0/1). */
u8 has_latency_events;
/* Offset of this blocks lines in the debug bus line name offsets array. */
- __le16 names_offset;
+ u16 names_offset;
};
@@ -422,13 +420,13 @@ struct dbg_dump_cond_hdr {
* memory data for registers dump
*/
struct dbg_dump_mem {
- __le32 dword0;
+ u32 dword0;
/* register address (in dwords) */
#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */
#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
- __le32 dword1;
+ u32 dword1;
/* register size (in dwords) */
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
@@ -444,7 +442,7 @@ struct dbg_dump_mem {
* register data for registers dump
*/
struct dbg_dump_reg {
- __le32 data;
+ u32 data;
/* register address (in dwords) */
#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
#define DBG_DUMP_REG_ADDRESS_SHIFT 0
@@ -460,7 +458,7 @@ struct dbg_dump_reg {
* split header for registers dump
*/
struct dbg_dump_split_hdr {
- __le32 hdr;
+ u32 hdr;
/* size in dwords of the data following this header */
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
@@ -474,8 +472,7 @@ struct dbg_dump_split_hdr {
*/
struct dbg_idle_chk_cond_hdr {
struct dbg_mode_hdr mode /* Mode header */;
-/* size in dwords of the data following this header */
- __le16 data_size;
+ u16 data_size /* size in dwords of the data following this header */;
};
@@ -483,7 +480,7 @@ struct dbg_idle_chk_cond_hdr {
* Idle Check condition register
*/
struct dbg_idle_chk_cond_reg {
- __le32 data;
+ u32 data;
/* Register GRC address (in dwords) */
#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
@@ -493,7 +490,7 @@ struct dbg_idle_chk_cond_reg {
/* value from block_id enum */
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- __le16 num_entries /* number of registers entries to check */;
+ u16 num_entries /* number of registers entries to check */;
u8 entry_size /* size of registers entry (in dwords) */;
u8 start_entry /* index of the first entry to check */;
};
@@ -503,7 +500,7 @@ struct dbg_idle_chk_cond_reg {
* Idle Check info register
*/
struct dbg_idle_chk_info_reg {
- __le32 data;
+ u32 data;
/* Register GRC address (in dwords) */
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
@@ -513,7 +510,7 @@ struct dbg_idle_chk_info_reg {
/* value from block_id enum */
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
- __le16 size /* register size in dwords */;
+ u16 size /* register size in dwords */;
struct dbg_mode_hdr mode /* Mode header */;
};
@@ -531,8 +528,8 @@ union dbg_idle_chk_reg {
* Idle Check result header
*/
struct dbg_idle_chk_result_hdr {
- __le16 rule_id /* Failing rule index */;
- __le16 mem_entry_id /* Failing memory entry index */;
+ u16 rule_id /* Failing rule index */;
+ u16 mem_entry_id /* Failing memory entry index */;
u8 num_dumped_cond_regs /* number of dumped condition registers */;
u8 num_dumped_info_regs /* number of dumped condition registers */;
u8 severity /* from dbg_idle_chk_severity_types enum */;
@@ -552,7 +549,7 @@ struct dbg_idle_chk_result_reg_hdr {
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
u8 start_entry /* index of the first checked entry */;
- __le16 size /* register size in dwords */;
+ u16 size /* register size in dwords */;
};
@@ -560,7 +557,7 @@ struct dbg_idle_chk_result_reg_hdr {
* Idle Check rule
*/
struct dbg_idle_chk_rule {
- __le16 rule_id /* Idle Check rule ID */;
+ u16 rule_id /* Idle Check rule ID */;
u8 severity /* value from dbg_idle_chk_severity_types enum */;
u8 cond_id /* Condition ID */;
u8 num_cond_regs /* number of condition registers */;
@@ -570,11 +567,11 @@ struct dbg_idle_chk_rule {
/* offset of this rules registers in the idle check register array
* (in dbg_idle_chk_reg units)
*/
- __le16 reg_offset;
+ u16 reg_offset;
/* offset of this rules immediate values in the immediate values array
* (in dwords)
*/
- __le16 imm_offset;
+ u16 imm_offset;
};
@@ -582,7 +579,7 @@ struct dbg_idle_chk_rule {
* Idle Check rule parsing data
*/
struct dbg_idle_chk_rule_parsing_data {
- __le32 data;
+ u32 data;
/* indicates if this register has a FW message */
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
@@ -693,8 +690,8 @@ struct dbg_bus_trigger_state_data {
* Debug Bus memory address
*/
struct dbg_bus_mem_addr {
- __le32 lo;
- __le32 hi;
+ u32 lo;
+ u32 hi;
};
/*
@@ -703,7 +700,7 @@ struct dbg_bus_mem_addr {
struct dbg_bus_pci_buf_data {
struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
- __le32 size /* PCI buffer size in bytes */;
+ u32 size /* PCI buffer size in bytes */;
};
/*
@@ -747,21 +744,20 @@ struct dbg_bus_storm_data {
u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
/* EID filter params to filter on. Valid only if eid_filter_en is set. */
union dbg_bus_storm_eid_params eid_filter_params;
-/* CID to filter on. Valid only if cid_filter_en is set. */
- __le32 cid;
+ u32 cid /* CID to filter on. Valid only if cid_filter_en is set. */;
};
/*
* Debug Bus data
*/
struct dbg_bus_data {
- __le32 app_version /* The tools version number of the application */;
+ u32 app_version /* The tools version number of the application */;
u8 state /* The current debug bus state */;
u8 hw_dwords /* HW dwords per cycle */;
/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
* HW ID of dword/qword i
*/
- __le16 hw_id_mask;
+ u16 hw_id_mask;
u8 num_enabled_blocks /* Number of blocks enabled for recording */;
u8 num_enabled_storms /* Number of Storms enabled for recording */;
u8 target /* Output target */;
@@ -783,7 +779,7 @@ struct dbg_bus_data {
* Valid only if both filter and trigger are enabled (0/1)
*/
u8 filter_post_trigger;
- __le16 reserved;
+ u16 reserved;
/* Indicates if the recording trigger is enabled (0/1) */
u8 trigger_en;
/* trigger states data */
@@ -933,9 +929,10 @@ struct dbg_grc_data {
/* Indicates if the GRC parameters were initialized */
u8 params_initialized;
u8 reserved1;
- __le16 reserved2;
-/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */
- __le32 param_val[48];
+ u16 reserved2;
+/* Value of each GRC parameter. Array size must match the enum dbg_grc_params.
+ */
+ u32 param_val[48];
};
@@ -960,7 +957,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */,
DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */,
DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */,
- DBG_GRC_PARAM_RESERVED /* reserved */,
+/* MCP Trace meta data size in bytes */
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */,
DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */,
DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
@@ -1087,11 +1085,11 @@ enum dbg_storms {
* Idle Check data
*/
struct idle_chk_data {
- __le32 buf_size /* Idle check buffer size in dwords */;
+ u32 buf_size /* Idle check buffer size in dwords */;
/* Indicates if the idle check buffer size was set (0/1) */
u8 buf_size_set;
u8 reserved1;
- __le16 reserved2;
+ u16 reserved2;
};
/*
@@ -1109,7 +1107,7 @@ struct dbg_tools_data {
u8 initialized /* Indicates if the data was initialized */;
u8 use_dmae /* Indicates if DMAE should be used */;
/* Numbers of registers that were read since last log */
- __le32 num_regs_read;
+ u32 num_regs_read;
};
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index ffbf5c71..efe3bb24 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -346,7 +346,7 @@ struct e4_xstorm_eth_conn_ag_ctx {
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
+ __le16 updated_qm_pq_id /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
@@ -1034,7 +1034,6 @@ struct eth_vport_rx_mode {
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
- __le16 reserved2[3];
};
@@ -1046,11 +1045,11 @@ struct eth_vport_tpa_param {
u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
-/* If set, start each tpa segment on new SGE (GRO mode). One SGE per segment
- * allowed
+/* If set, start each TPA segment on new BD (GRO mode). One BD per segment
+ * allowed.
*/
u8 tpa_pkt_split_flg;
-/* If set, put header of first TPA segment on bd and data on SGE */
+/* If set, put header of first TPA segment on first BD and data on second BD. */
u8 tpa_hdr_data_split_flg;
/* If set, GRO data consistent will checked for TPA continue */
u8 tpa_gro_consistent_flg;
@@ -1089,7 +1088,6 @@ struct eth_vport_tx_mode {
#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
- __le16 reserved2[3];
};
@@ -1216,7 +1214,9 @@ struct rx_queue_update_ramrod_data {
u8 complete_cqe_flg /* post completion to the CQE ring if set */;
u8 complete_event_flg /* post completion to the event ring if set */;
u8 vport_id /* ID of virtual port */;
- u8 reserved[4];
+/* If set, update default rss queue to this RX queue. */
+ u8 set_default_rss_queue;
+ u8 reserved[3];
u8 reserved1 /* FW reserved. */;
u8 reserved2 /* FW reserved. */;
u8 reserved3 /* FW reserved. */;
@@ -1257,7 +1257,8 @@ struct rx_update_gft_filter_data {
__le16 action_icid;
__le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */;
__le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */;
- u8 vport_id /* RX vport Id. */;
+/* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */
+ __le16 vport_id;
/* If set, action_icid will used for GFT filter update. */
u8 action_icid_valid;
/* If set, rx_qid will used for traffic steering, in additional to vport_id.
@@ -1273,7 +1274,10 @@ struct rx_update_gft_filter_data {
* case of error.
*/
u8 assert_on_error;
- u8 reserved[2];
+/* If set, inner VLAN will be removed regardless to VPORT configuration.
+ * Supported by E4 only.
+ */
+ u8 inner_vlan_removal_en;
};
@@ -1403,7 +1407,7 @@ struct vport_start_ramrod_data {
u8 ctl_frame_mac_check_en;
/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
- u8 reserved[5];
+ u8 reserved[1];
};
@@ -1486,6 +1490,7 @@ struct vport_update_ramrod_data {
struct vport_update_ramrod_data_cmn common;
struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
+ __le32 reserved[3];
/* TPA configuration parameters */
struct eth_vport_tpa_param tpa_param;
struct vport_update_ramrod_mcast approx_mcast;
@@ -1809,7 +1814,7 @@ struct E4XstormEthConnAgCtxDqExtLdPart {
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
+ __le16 updated_qm_pq_id /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
@@ -2153,7 +2158,7 @@ struct e4_xstorm_eth_hw_conn_ag_ctx {
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
+ __le16 updated_qm_pq_id /* word5 */;
__le16 conn_dpi /* conn_dpi */;
};
diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h
index 48b0048f..c318514b 100644
--- a/drivers/net/qede/base/ecore_hsi_init_func.h
+++ b/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -24,10 +24,10 @@
* BRB RAM init requirements
*/
struct init_brb_ram_req {
- __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
- __le32 headroom_per_tc /* headroom size per TC, in bytes */;
- __le32 min_pkt_size /* min packet size, in bytes */;
- __le32 max_ports_per_engine /* min packet size, in bytes */;
+ u32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
+ u32 headroom_per_tc /* headroom size per TC, in bytes */;
+ u32 min_pkt_size /* min packet size, in bytes */;
+ u32 max_ports_per_engine /* min packet size, in bytes */;
u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
};
@@ -44,15 +44,14 @@ struct init_ets_tc_req {
* (indicated by the weight field)
*/
u8 use_wfq;
-/* An arbitration weight. Valid only if use_wfq is set. */
- __le16 weight;
+ u16 weight /* An arbitration weight. Valid only if use_wfq is set. */;
};
/*
* ETS init requirements
*/
struct init_ets_req {
- __le32 mtu /* Max packet size (in bytes) */;
+ u32 mtu /* Max packet size (in bytes) */;
/* ETS initialization requirements per TC. */
struct init_ets_tc_req tc_req[NUM_OF_TCS];
};
@@ -64,12 +63,12 @@ struct init_ets_req {
*/
struct init_nig_lb_rl_req {
/* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
- __le16 lb_mac_rate;
+ u16 lb_mac_rate;
/* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
- __le16 lb_rate;
- __le32 mtu /* Max packet size (in bytes) */;
+ u16 lb_rate;
+ u32 mtu /* Max packet size (in bytes) */;
/* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */
- __le16 tc_rate[NUM_OF_PHYS_TCS];
+ u16 tc_rate[NUM_OF_PHYS_TCS];
};
@@ -98,10 +97,10 @@ struct init_qm_port_params {
/* Vector of valid bits for active TCs used by this port */
u8 active_phys_tcs;
/* number of PBF command lines that can be used by this port */
- __le16 num_pbf_cmd_lines;
+ u16 num_pbf_cmd_lines;
/* number of BTB blocks that can be used by this port */
- __le16 num_btb_blocks;
- __le16 reserved;
+ u16 num_btb_blocks;
+ u16 reserved;
};
@@ -114,6 +113,9 @@ struct init_qm_pq_params {
u8 wrr_group /* WRR group */;
/* Indicates if a rate limiter should be allocated for the PQ (0/1) */
u8 rl_valid;
+ u8 port_id /* Port ID */;
+ u8 reserved0;
+ u16 reserved1;
};
@@ -124,13 +126,13 @@ struct init_qm_vport_params {
/* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if
* VPORT RL is globally disabled.
*/
- __le32 vport_rl;
+ u32 vport_rl;
/* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is
* globally disabled.
*/
- __le16 vport_wfq;
+ u16 vport_wfq;
/* the first Tx PQ ID associated with this VPORT for each TC. */
- __le16 first_tx_pq_id[NUM_OF_TCS];
+ u16 first_tx_pq_id[NUM_OF_TCS];
};
#endif /* __ECORE_HSI_INIT_FUNC__ */
diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h
index 1f57e9b2..2e338a98 100644
--- a/drivers/net/qede/base/ecore_hsi_init_tool.h
+++ b/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -30,59 +30,13 @@ enum chip_ids {
};
-struct fw_asserts_ram_section {
-/* The offset of the section in the RAM in RAM lines (64-bit units) */
- __le16 section_ram_line_offset;
-/* The size of the section in RAM lines (64-bit units) */
- __le16 section_ram_line_size;
-/* The offset of the asserts list within the section in dwords */
- u8 list_dword_offset;
-/* The size of an assert list element in dwords */
- u8 list_element_dword_size;
- u8 list_num_elements /* The number of elements in the asserts list */;
-/* The offset of the next list index field within the section in dwords */
- u8 list_next_index_dword_offset;
-};
-
-
-struct fw_ver_num {
- u8 major /* Firmware major version number */;
- u8 minor /* Firmware minor version number */;
- u8 rev /* Firmware revision version number */;
-/* Firmware engineering version number (for bootleg versions) */
- u8 eng;
-};
-
-struct fw_ver_info {
- __le16 tools_ver /* Tools version number */;
- u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
- u8 reserved1;
- struct fw_ver_num num /* FW version number */;
- __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
- __le32 reserved2;
-};
-
-struct fw_info {
- struct fw_ver_info ver /* FW version information */;
-/* Info regarding the FW asserts section in the Storm RAM */
- struct fw_asserts_ram_section fw_asserts_section;
-};
-
-
-struct fw_info_location {
-/* GRC address where the fw_info struct is located. */
- __le32 grc_addr;
-/* Size of the fw_info structure (thats located at the grc_addr). */
- __le32 size;
-};
-
/*
* Binary buffer header
*/
struct bin_buffer_hdr {
/* buffer offset in bytes from the beginning of the binary file */
- __le32 offset;
- __le32 length /* buffer length in bytes */;
+ u32 offset;
+ u32 length /* buffer length in bytes */;
};
@@ -103,7 +57,7 @@ enum bin_init_buffer_type {
* init array header: raw
*/
struct init_array_raw_hdr {
- __le32 data;
+ u32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
@@ -116,7 +70,7 @@ struct init_array_raw_hdr {
* init array header: standard
*/
struct init_array_standard_hdr {
- __le32 data;
+ u32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
@@ -129,7 +83,7 @@ struct init_array_standard_hdr {
* init array header: zipped
*/
struct init_array_zipped_hdr {
- __le32 data;
+ u32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
@@ -142,7 +96,7 @@ struct init_array_zipped_hdr {
* init array header: pattern
*/
struct init_array_pattern_hdr {
- __le32 data;
+ u32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
@@ -223,14 +177,14 @@ enum init_array_types {
* init operation: callback
*/
struct init_callback_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_CALLBACK_OP_OP_MASK 0xF
#define INIT_CALLBACK_OP_OP_SHIFT 0
#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
- __le16 callback_id /* Callback ID */;
- __le16 block_id /* Blocks ID */;
+ u16 callback_id /* Callback ID */;
+ u16 block_id /* Blocks ID */;
};
@@ -238,7 +192,7 @@ struct init_callback_op {
* init operation: delay
*/
struct init_delay_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_DELAY_OP_OP_MASK 0xF
#define INIT_DELAY_OP_OP_SHIFT 0
@@ -252,7 +206,7 @@ struct init_delay_op {
* init operation: if_mode
*/
struct init_if_mode_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_IF_MODE_OP_OP_MASK 0xF
#define INIT_IF_MODE_OP_OP_SHIFT 0
@@ -261,9 +215,8 @@ struct init_if_mode_op {
/* Commands to skip if the modes dont match */
#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
- __le16 reserved2;
-/* offset (in bytes) in modes expression buffer */
- __le16 modes_buf_offset;
+ u16 reserved2;
+ u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */;
};
@@ -271,7 +224,7 @@ struct init_if_mode_op {
* init operation: if_phase
*/
struct init_if_phase_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_IF_PHASE_OP_OP_MASK 0xF
#define INIT_IF_PHASE_OP_OP_SHIFT 0
@@ -283,7 +236,7 @@ struct init_if_phase_op {
/* Commands to skip if the phases dont match */
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
- __le32 phase_data;
+ u32 phase_data;
#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
@@ -308,21 +261,21 @@ enum init_mode_ops {
* init operation: raw
*/
struct init_raw_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_RAW_OP_OP_MASK 0xF
#define INIT_RAW_OP_OP_SHIFT 0
#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
#define INIT_RAW_OP_PARAM1_SHIFT 4
- __le32 param2 /* Init param 2 */;
+ u32 param2 /* Init param 2 */;
};
/*
* init array params
*/
struct init_op_array_params {
- __le16 size /* array size in dwords */;
- __le16 offset /* array start offset in dwords */;
+ u16 size /* array size in dwords */;
+ u16 offset /* array start offset in dwords */;
};
/*
@@ -330,11 +283,11 @@ struct init_op_array_params {
*/
union init_write_args {
/* value to write, used when init source is INIT_SRC_INLINE */
- __le32 inline_val;
+ u32 inline_val;
/* number of zeros to write, used when init source is INIT_SRC_ZEROS */
- __le32 zeros_count;
+ u32 zeros_count;
/* array offset to write, used when init source is INIT_SRC_ARRAY */
- __le32 array_offset;
+ u32 array_offset;
/* runtime array params to write, used when init source is INIT_SRC_RUNTIME */
struct init_op_array_params runtime;
};
@@ -343,7 +296,7 @@ union init_write_args {
* init operation: write
*/
struct init_write_op {
- __le32 data;
+ u32 data;
/* init operation, from init_op_types enum */
#define INIT_WRITE_OP_OP_MASK 0xF
#define INIT_WRITE_OP_OP_SHIFT 0
@@ -365,7 +318,7 @@ struct init_write_op {
* init operation: read
*/
struct init_read_op {
- __le32 op_data;
+ u32 op_data;
/* init operation, from init_op_types enum */
#define INIT_READ_OP_OP_MASK 0xF
#define INIT_READ_OP_OP_SHIFT 0
@@ -378,7 +331,7 @@ struct init_read_op {
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
/* expected polling value, used only when polling is done */
- __le32 expected_val;
+ u32 expected_val;
};
/*
@@ -444,11 +397,11 @@ enum init_source_types {
* Internal RAM Offsets macro data
*/
struct iro {
- __le32 base /* RAM field offset */;
- __le16 m1 /* multiplier 1 */;
- __le16 m2 /* multiplier 2 */;
- __le16 m3 /* multiplier 3 */;
- __le16 size /* RAM field size */;
+ u32 base /* RAM field offset */;
+ u16 m1 /* multiplier 1 */;
+ u16 m2 /* multiplier 2 */;
+ u16 m3 /* multiplier 3 */;
+ u16 size /* RAM field size */;
};
#endif /* __ECORE_HSI_INIT_TOOL__ */
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 84f273b0..b00c33d4 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -38,6 +38,12 @@ struct ecore_ptt_pool {
struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
};
+void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
+ p_hwfn->p_ptt_pool = OSAL_NULL;
+}
+
enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
@@ -65,10 +71,12 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->p_ptt_pool = p_pool;
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
+ __ecore_ptt_pool_free(p_hwfn);
+ return ECORE_NOMEM;
+ }
#endif
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
-
return ECORE_SUCCESS;
}
@@ -89,7 +97,7 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
#endif
- OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
+ __ecore_ptt_pool_free(p_hwfn);
}
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
@@ -569,7 +577,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
if (*p_comp == OSAL_NULL) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `p_completion_word'\n");
goto err;
}
@@ -578,7 +586,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(struct dmae_cmd));
if (*p_cmd == OSAL_NULL) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct dmae_cmd'\n");
goto err;
}
@@ -587,12 +595,13 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(u32) * DMAE_MAX_RW_SIZE);
if (*p_buff == OSAL_NULL) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `intermediate_buffer'\n");
goto err;
}
- p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+ p_hwfn->dmae_info.b_mem_ready = true;
return ECORE_SUCCESS;
err:
@@ -604,8 +613,9 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
{
dma_addr_t p_phys;
- /* Just make sure no one is in the middle */
- OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
+ p_hwfn->dmae_info.b_mem_ready = false;
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
@@ -630,8 +640,6 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
}
-
- OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
}
static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
@@ -777,6 +785,15 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u32 offset = 0;
+ if (!p_hwfn->dmae_info.b_mem_ready) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
+ (unsigned long)src_addr, src_type,
+ (unsigned long)dst_addr, dst_type,
+ size_in_dwords);
+ return ECORE_NOMEM;
+ }
+
if (p_hwfn->p_dev->recov_in_prog) {
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
@@ -870,7 +887,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
params.flags = flags;
- OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
grc_addr_in_dw,
@@ -878,7 +895,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
ECORE_DMAE_ADDRESS_GRC,
size_in_dwords, &params);
- OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
@@ -896,14 +913,14 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
params.flags = flags;
- OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
dest_addr, ECORE_DMAE_ADDRESS_GRC,
ECORE_DMAE_ADDRESS_HOST_VIRT,
size_in_dwords, &params);
- OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
@@ -917,7 +934,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
{
enum _ecore_status_t rc;
- OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
dest_addr,
@@ -925,7 +942,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
ECORE_DMAE_ADDRESS_HOST_PHYS,
size_in_dwords, p_params);
- OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
@@ -944,3 +961,74 @@ void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
}
+
+enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ const char *phase)
+{
+ u32 size = OSAL_PAGE_SIZE / 2, val;
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 *p_tmp;
+
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, false,
+ "DMAE sanity [%s]: failed to allocate memory\n",
+ phase);
+ return ECORE_NOMEM;
+ }
+
+ /* Fill the bottom half of the allocated memory with a known pattern */
+ for (p_tmp = (u32 *)p_virt;
+ p_tmp < (u32 *)((u8 *)p_virt + size);
+ p_tmp++) {
+ /* Save the address itself as the value */
+ val = (u32)(osal_uintptr_t)p_tmp;
+ *p_tmp = val;
+ }
+
+ /* Zero the top half of the allocated memory */
+ OSAL_MEM_ZERO((u8 *)p_virt + size, size);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n",
+ phase, (unsigned long)p_phys, p_virt,
+ (unsigned long)(p_phys + size),
+ (u8 *)p_virt + size, size);
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+ size / 4 /* size_in_dwords */, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
+ phase, rc);
+ goto out;
+ }
+
+ /* Verify that the top half of the allocated memory has the pattern */
+ for (p_tmp = (u32 *)((u8 *)p_virt + size);
+ p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
+ p_tmp++) {
+ /* The corresponding address in the bottom half */
+ val = (u32)(osal_uintptr_t)p_tmp - size;
+
+ if (*p_tmp != val) {
+ DP_NOTICE(p_hwfn, false,
+ "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+ phase,
+ (unsigned long)p_phys +
+ ((u8 *)p_tmp - (u8 *)p_virt),
+ p_tmp, *p_tmp, val);
+ rc = ECORE_UNKNOWN_ERROR;
+ goto out;
+ }
+ }
+
+out:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 0b9814f5..f3f513e8 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -255,4 +255,8 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
enum ecore_hw_err_type err_type);
+enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ const char *phase);
+
#endif /* __ECORE_HW_H__ */
diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h
index 4456af43..2f4bd536 100644
--- a/drivers/net/qede/base/ecore_hw_defs.h
+++ b/drivers/net/qede/base/ecore_hw_defs.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 1da80a65..3f986629 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -76,12 +76,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
/* RL increment value - rate is specified in mbps. the factor of 1.01 was
-* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
-* 2544 test. In this scenario the PF RL was reducing the line rate to 99%
-* although the credit increment value was the correct one and FW calculated
-* correct packet sizes. The reason for the inaccuracy of the RL is unknown at
-* this point.
-*/
+ * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+ * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+ * although the credit increment value was the correct one and FW calculated
+ * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+ * this point.
+ */
#define QM_RL_INC_VAL(rate) \
OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
(8 * 100)), 1)
@@ -182,7 +182,7 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4)
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -421,9 +421,9 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
@@ -437,7 +437,7 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
- u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
+ u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
num_pqs = num_pf_pqs + num_vf_pqs;
@@ -467,11 +467,11 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
bool is_vf_pq, rl_valid;
u16 first_tx_pq_id;
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
+ pq_params[i].tc_id,
max_phys_tcs_per_port);
is_vf_pq = (i >= num_pf_pqs);
- rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
- max_qm_global_rls;
+ rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - start_vport;
@@ -492,28 +492,38 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
}
/* Check RL ID */
- if (pq_params[i].rl_valid && pq_params[i].vport_id >=
- max_qm_global_rls)
+ if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT ID for rate limiter config\n");
+ rl_valid = false;
+ }
/* Prepare PQ map entry */
struct qm_rf_pq_map_e4 tx_pq_map;
+
QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
1 : 0,
first_tx_pq_id, rl_valid ?
pq_params[i].vport_id : 0,
ext_voq, pq_params[i].wrr_group);
- /* Set base address */
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+ /* Clear PQ pointer table entry (64 bit) */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
/* Write PQ info to RAM */
if (WRITE_PQ_INFO_TO_RAM != 0) {
u32 pq_info = 0;
+
pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
- pq_params[i].tc_id, port_id,
+ pq_params[i].tc_id,
+ pq_params[i].port_id,
rl_valid ? 1 : 0, rl_valid ?
pq_params[i].vport_id : 0);
ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
@@ -540,12 +550,13 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u8 pf_id,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_tids,
u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
- u16 i, pq_id, pq_group;
+ u16 i, j, pq_id, pq_group;
/* A single other PQ group is used in each PF, where PQ group i is used
* in PF i.
@@ -563,11 +574,19 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+
+ /* Clear PQ pointer table entry */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLOTHER_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
mem_addr_4kb += pq_mem_4kb;
}
}
@@ -576,7 +595,6 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
* Return -1 on error.
*/
static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
- u8 port_id,
u8 pf_id,
u16 pf_wfq,
u8 max_phys_tcs_per_port,
@@ -595,7 +613,8 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
}
for (i = 0; i < num_tx_pqs; i++) {
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
+ pq_params[i].tc_id,
max_phys_tcs_per_port);
crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
QM_REG_WFQPFCRD_RT_OFFSET :
@@ -604,12 +623,12 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
(pf_id % MAX_NUM_PFS_BB);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id,
- inc_val);
}
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
+ pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
+
return 0;
}
@@ -820,9 +839,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
@@ -850,20 +869,21 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
/* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
- ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
+ ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
+ num_tids, 0);
#endif
/* Map Tx PQs */
- ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
- max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
+ ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
+ is_pf_loading, num_pf_cids, num_vf_cids,
start_pq, num_pf_pqs, num_vf_pqs, start_vport,
other_mem_size_4kb, pq_params, vport_params);
/* Init PF WFQ */
if (pf_wfq)
- if (ecore_pf_wfq_rt_init
- (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
- num_pf_pqs + num_vf_pqs, pq_params))
+ if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
+ max_phys_tcs_per_port,
+ num_pf_pqs + num_vf_pqs, pq_params))
return -1;
/* Init PF RL */
@@ -1419,7 +1439,9 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
-#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
+#define PRS_ETH_OUTPUT_FORMAT -46832
+
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
@@ -1444,9 +1466,14 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
@@ -1476,9 +1503,14 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
@@ -1526,9 +1558,14 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
ip_geneve_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
@@ -1548,6 +1585,36 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+
+void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool enable)
+{
+ u32 reg_val, cfg_mask;
+
+ /* read PRS config register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
+
+ /* set VXLAN_NO_L2_ENABLE mask */
+ cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
+
+ if (enable) {
+ /* set VXLAN_NO_L2_ENABLE flag */
+ reg_val |= cfg_mask;
+
+ /* update PRS FIC register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
+ } else {
+ /* clear VXLAN_NO_L2_ENABLE flag */
+ reg_val &= ~cfg_mask;
+ }
+
+ /* write PRS config register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
+}
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
@@ -1664,6 +1731,10 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
ram_line_lo = 0;
ram_line_hi = 0;
+ /* Tunnel type */
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
@@ -1675,9 +1746,14 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
- } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
}
ecore_wr(p_hwfn, p_ptt,
@@ -1921,3 +1997,53 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
+
+#define RSS_IND_TABLE_BASE_ADDR 4112
+#define RSS_IND_TABLE_VPORT_SIZE 16
+#define RSS_IND_TABLE_ENTRY_PER_LINE 8
+
+/* Update RSS indirection table entry. */
+void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 rss_id,
+ u8 ind_table_index,
+ u16 ind_table_value)
+{
+ u32 cnt, rss_addr;
+ u32 *reg_val;
+ u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE];
+ u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE];
+
+ /* get entry address */
+ rss_addr = RSS_IND_TABLE_BASE_ADDR +
+ RSS_IND_TABLE_VPORT_SIZE * rss_id +
+ ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE;
+
+ /* prepare update command */
+ ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE;
+
+ for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) {
+ if (cnt == ind_table_index) {
+ rss_ind_entry[cnt] = ind_table_value;
+ rss_ind_mask[cnt] = 0xFFFF;
+ } else {
+ rss_ind_entry[cnt] = 0;
+ rss_ind_mask[cnt] = 0;
+ }
+ }
+
+ /* Update entry in HW*/
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+
+ reg_val = (u32 *)rss_ind_mask;
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]);
+
+ reg_val = (u32 *)rss_ind_entry;
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);
+}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index ab560e59..310c9ed7 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -61,9 +61,10 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
- * @param port_id - port ID
* @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param is_pf_loading - indicates if the PF is currently loading,
+ * i.e. it has no allocated QM resources.
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@@ -87,9 +88,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
*/
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
@@ -259,6 +260,16 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct init_brb_ram_req *req);
#endif /* UNUSED_HSI_FUNC */
+/**
+ * @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param enable - VXLAN no L2 enable flag.
+ */
+void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool enable);
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
@@ -462,4 +473,22 @@ void ecore_memset_session_ctx(void *p_ctx_mem,
void ecore_memset_task_ctx(void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
+
+/**
+ * @brief ecore_update_eth_rss_ind_table_entry - Update RSS indirection table
+ * entry.
+ * The function must run in exclusive mode to prevent wrong RSS configuration.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param rss_id - RSS engine ID.
+ * @param ind_table_index - RSS indirect table index.
+ * @param ind_table_value - RSS indirect table new value.
+ */
+void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 rss_id,
+ u8 ind_table_index,
+ u16 ind_table_value);
+
#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index 91633c11..eadccf40 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -389,23 +389,29 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
}
if (i == ECORE_INIT_MAX_POLL_COUNT)
- DP_ERR(p_hwfn,
- "Timeout when polling reg: 0x%08x [ Waiting-for: %08x"
- " Got: %08x (comparsion %08x)]\n",
+ DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
OSAL_LE32_TO_CPU(cmd->op_data));
}
-/* init_ops callbacks entry point.
- * OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
- * Should be removed when the function is actually used.
- */
-static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt OSAL_UNUSED * p_ptt,
- struct init_callback_op OSAL_UNUSED * p_cmd)
+/* init_ops callbacks entry point */
+static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
{
- DP_NOTICE(p_hwfn, true,
- "Currently init values have no need of callbacks\n");
+ enum _ecore_status_t rc;
+
+ switch (p_cmd->callback_id) {
+ case DMAE_READY_CB:
+ rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
+ p_cmd->callback_id);
+ return ECORE_INVAL;
+ }
+
+ return rc;
}
static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
@@ -513,7 +519,7 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
break;
case INIT_OP_CALLBACK:
- ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break;
}
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
index e293a4a3..7ca00e4e 100644
--- a/drivers/net/qede/base/ecore_init_ops.h
+++ b/drivers/net/qede/base/ecore_init_ops.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index e6cef85b..7272f059 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -1,11 +1,13 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
+#include <rte_string_fns.h>
+
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_spq.h"
@@ -285,9 +287,11 @@ out:
#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+ struct ecore_ptt *p_ptt,
+ bool is_hw_init)
{
u32 tmp;
+ char str[512] = {0};
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
@@ -299,9 +303,8 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
-
- DP_NOTICE(p_hwfn, false,
- "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ OSAL_SNPRINTF(str, 512,
+ "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
addr_hi, addr_lo, details,
(u8)((details &
ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
@@ -318,6 +321,10 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
1 : 0));
+ if (is_hw_init)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str);
+ else
+ DP_NOTICE(p_hwfn, false, "%s", str);
}
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
@@ -393,7 +400,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
{
- return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+ return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
}
static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
@@ -1104,9 +1111,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
p_aeu->bit_name,
num);
else
- OSAL_STRNCPY(bit_name,
- p_aeu->bit_name,
- 30);
+ strlcpy(bit_name,
+ p_aeu->bit_name,
+ sizeof(bit_name));
/* We now need to pass bitmask in its
* correct position.
@@ -1406,8 +1413,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
/* SB struct */
p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
- DP_NOTICE(p_dev, true,
- "Failed to allocate `struct ecore_sb_attn_info'\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n");
return ECORE_NOMEM;
}
@@ -1415,8 +1421,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
SB_ATTN_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
- DP_NOTICE(p_dev, true,
- "Failed to allocate status block (attentions)\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n");
OSAL_FREE(p_dev, p_sb);
return ECORE_NOMEM;
}
@@ -1795,8 +1800,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_sb));
if (!p_sb) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_sb_info'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
return ECORE_NOMEM;
}
@@ -1804,7 +1808,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
&p_phys, SB_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n");
OSAL_FREE(p_hwfn->p_dev, p_sb);
return ECORE_NOMEM;
}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 563051c3..bb22fdd8 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -256,6 +256,7 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
#endif
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+ struct ecore_ptt *p_ptt,
+ bool is_hw_init);
#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index 24cdf5ed..dff53776 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 218ef50b..ee7cad74 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -540,6 +540,17 @@ bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
u32 ecore_iov_pfvf_msg_length(void);
/**
+ * @brief Returns MAC address if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return OSAL_NULL if mac isn't set; Otherwise, returns MAC.
+ */
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
* @brief Returns forced MAC address if one is configured
*
* @parm p_hwfn
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index 360d7f88..e4dc1c92 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 41532eeb..b47abeb9 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -13,9 +13,9 @@ static const struct iro iro_arr[51] = {
/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x0, 0x0, 0x0, 0x0, 0x8},
/* TSTORM_PORT_STAT_OFFSET(port_id) */
- { 0x4cb0, 0x80, 0x0, 0x0, 0x80},
+ { 0x4cb8, 0x88, 0x0, 0x0, 0x88},
/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
- { 0x6508, 0x20, 0x0, 0x0, 0x20},
+ { 0x6530, 0x20, 0x0, 0x0, 0x20},
/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
{ 0xb00, 0x8, 0x0, 0x0, 0x4},
/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
@@ -27,49 +27,49 @@ static const struct iro iro_arr[51] = {
/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
{ 0x84, 0x8, 0x0, 0x0, 0x2},
/* XSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x4c40, 0x0, 0x0, 0x0, 0x78},
+ { 0x4c48, 0x0, 0x0, 0x0, 0x78},
/* YSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x3e10, 0x0, 0x0, 0x0, 0x78},
+ { 0x3e38, 0x0, 0x0, 0x0, 0x78},
/* PSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x2b50, 0x0, 0x0, 0x0, 0x78},
+ { 0x2b78, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x4c38, 0x0, 0x0, 0x0, 0x78},
+ { 0x4c40, 0x0, 0x0, 0x0, 0x78},
/* MSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x4990, 0x0, 0x0, 0x0, 0x78},
+ { 0x4998, 0x0, 0x0, 0x0, 0x78},
/* USTORM_INTEG_TEST_DATA_OFFSET */
- { 0x7f48, 0x0, 0x0, 0x0, 0x78},
+ { 0x7f50, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0x61e8, 0x10, 0x0, 0x0, 0x10},
+ { 0x6210, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0xb820, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
- { 0x96b8, 0x30, 0x0, 0x0, 0x30},
+ { 0x96c0, 0x30, 0x0, 0x0, 0x30},
/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x4b60, 0x80, 0x0, 0x0, 0x40},
+ { 0x4b68, 0x80, 0x0, 0x0, 0x40},
/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
- { 0x53a0, 0x80, 0x4, 0x0, 0x4},
+ { 0x53a8, 0x80, 0x4, 0x0, 0x4},
/* MSTORM_TPA_TIMEOUT_US_OFFSET */
- { 0xc7c8, 0x0, 0x0, 0x0, 0x4},
+ { 0xc7d0, 0x0, 0x0, 0x0, 0x4},
/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0x4ba0, 0x80, 0x0, 0x0, 0x20},
+ { 0x4ba8, 0x80, 0x0, 0x0, 0x20},
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x8150, 0x40, 0x0, 0x0, 0x30},
+ { 0x8158, 0x40, 0x0, 0x0, 0x30},
/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0xe770, 0x60, 0x0, 0x0, 0x60},
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x2ce8, 0x80, 0x0, 0x0, 0x38},
+ { 0x2d10, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xf2b0, 0x78, 0x0, 0x0, 0x78},
+ { 0xf2b8, 0x78, 0x0, 0x0, 0x78},
/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* TSTORM_ETH_PRS_INPUT_OFFSET */
- { 0xaef8, 0x0, 0x0, 0x0, 0xf0},
+ { 0xaf20, 0x0, 0x0, 0x0, 0xf0},
/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
- { 0xafe8, 0x8, 0x0, 0x0, 0x8},
+ { 0xb010, 0x8, 0x0, 0x0, 0x8},
/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
{ 0x1f8, 0x8, 0x0, 0x0, 0x8},
/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
@@ -81,37 +81,37 @@ static const struct iro iro_arr[51] = {
/* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
{ 0x0, 0x8, 0x0, 0x0, 0x8},
/* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
- { 0x200, 0x18, 0x8, 0x0, 0x8},
+ { 0x400, 0x18, 0x8, 0x0, 0x8},
/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xb78, 0x18, 0x8, 0x0, 0x2},
/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0xd878, 0x50, 0x0, 0x0, 0x3c},
+ { 0xd898, 0x50, 0x0, 0x0, 0x3c},
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x12908, 0x18, 0x0, 0x0, 0x10},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x11aa8, 0x40, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0xa580, 0x50, 0x0, 0x0, 0x20},
+ { 0xa588, 0x50, 0x0, 0x0, 0x20},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x86f8, 0x40, 0x0, 0x0, 0x28},
+ { 0x8700, 0x40, 0x0, 0x0, 0x28},
/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x102f8, 0x18, 0x0, 0x0, 0x10},
+ { 0x10300, 0x18, 0x0, 0x0, 0x10},
/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
- { 0xde28, 0x48, 0x0, 0x0, 0x38},
+ { 0xde48, 0x48, 0x0, 0x0, 0x38},
/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
- { 0x10760, 0x20, 0x0, 0x0, 0x20},
+ { 0x10768, 0x20, 0x0, 0x0, 0x20},
/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x2d20, 0x80, 0x0, 0x0, 0x10},
+ { 0x2d48, 0x80, 0x0, 0x0, 0x10},
/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x5020, 0x10, 0x0, 0x0, 0x10},
+ { 0x5048, 0x10, 0x0, 0x0, 0x10},
/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
- { 0xc9b0, 0x30, 0x0, 0x0, 0x10},
+ { 0xc9b8, 0x30, 0x0, 0x0, 0x10},
/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
- { 0xeec0, 0x10, 0x0, 0x0, 0x10},
+ { 0xed90, 0x10, 0x0, 0x0, 0x10},
/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
- { 0xa398, 0x10, 0x0, 0x0, 0x10},
+ { 0xa520, 0x10, 0x0, 0x0, 0x10},
/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
- { 0x13100, 0x8, 0x0, 0x0, 0x8},
+ { 0x13108, 0x8, 0x0, 0x0, 0x8},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index e3afc8a3..91d89e56 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -77,7 +77,8 @@ enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
+ return ECORE_NOMEM;
#endif
return ECORE_SUCCESS;
@@ -110,6 +111,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
break;
OSAL_VFREE(p_hwfn->p_dev,
p_hwfn->p_l2_info->pp_qid_usage[i]);
+ p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
@@ -119,6 +121,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
#endif
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
+ p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
out_l2_info:
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
@@ -687,7 +690,7 @@ ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
p_ramrod->common.update_approx_mcast_flg = 1;
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
+ u32 *p_bins = p_params->bins;
p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
}
@@ -1185,11 +1188,20 @@ ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM * *pp_doorbell)
{
enum _ecore_status_t rc;
+ u16 pq_id;
+
+ /* TODO - set tc in the pq_params for multi-cos.
+ * If pacing is enabled then select queue according to
+ * rate limiter availability otherwise select queue based
+ * on multi cos.
+ */
+ if (IS_ECORE_PACING(p_hwfn))
+ pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id);
+ else
+ pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc);
- /* TODO - set tc in the pq_params for multi-cos */
- rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
- pbl_addr, pbl_size,
- ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr,
+ pbl_size, pq_id);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1556,8 +1568,8 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
u8 abs_vport_id = 0;
@@ -1596,8 +1608,7 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
/* explicitly clear out the entire vector */
OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
0, sizeof(p_ramrod->approx_mcast.bins));
- OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
/* filter ADD op is explicit set op and it removes
* any existing filters for the vport.
*/
@@ -1606,16 +1617,15 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
u32 bit;
bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- OSAL_SET_BIT(bit, bins);
+ bins[bit / 32] |= 1 << (bit % 32);
}
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
struct vport_update_ramrod_mcast *p_ramrod_bins;
- u32 *p_bins = (u32 *)bins;
p_ramrod_bins = &p_ramrod->approx_mcast;
- p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+ p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]);
}
}
@@ -1945,6 +1955,11 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
p_ah->tx_1519_to_max_byte_packets =
port_stats.eth.u1.ah1.t1519_to_max;
}
+
+ p_common->link_change_count = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ link_change_count));
}
void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
@@ -2061,11 +2076,14 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
/* PORT statistics are not necessarily reset, so we need to
* read and create a baseline for future statistics.
+ * Link change stat is maintained by MFW, return its value as is.
*/
if (!p_dev->reset_stats)
DP_INFO(p_dev, "Reset stats not allocated\n");
- else
+ else {
_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
+ p_dev->reset_stats->common.link_change_count = 0;
+ }
}
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
@@ -2150,7 +2168,7 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
p_ramrod->flow_id_valid = 0;
p_ramrod->flow_id = 0;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id);
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
@@ -2267,3 +2285,22 @@ out:
return rc;
}
+
+enum _ecore_status_t
+ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid, u32 rate)
+{
+ struct ecore_mcp_link_state *p_link;
+ u8 vport;
+
+ vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "About to rate limit qm vport %d for queue %d with rate %d\n",
+ vport, p_cid->rel.queue_id, rate);
+
+ return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
+ p_link->speed);
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index f4212cf2..bea6a6df 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index ed9837bf..43ebbd12 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -332,7 +332,7 @@ struct ecore_sp_vport_update_params {
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
- unsigned long bins[8];
+ u32 bins[8];
struct ecore_rss_params *rss_params;
struct ecore_filter_accept_flags accept_flags;
struct ecore_sge_tpa_params *sge_tpa_params;
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 8edd2e96..784d28c5 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -9,6 +9,7 @@
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_status.h"
+#include "nvm_cfg.h"
#include "ecore_mcp.h"
#include "mcp_public.h"
#include "reg_addr.h"
@@ -240,15 +241,24 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
/* Allocate mcp_info structure */
p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(*p_hwfn->mcp_info));
- if (!p_hwfn->mcp_info)
- goto err;
+ sizeof(*p_hwfn->mcp_info));
+ if (!p_hwfn->mcp_info) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
+ return ECORE_NOMEM;
+ }
p_info = p_hwfn->mcp_info;
/* Initialize the MFW spinlocks */
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+ return ECORE_NOMEM;
+ }
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
+ OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+ return ECORE_NOMEM;
+ }
#endif
OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
@@ -272,7 +282,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
err:
- DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
ecore_mcp_free(p_hwfn);
return ECORE_NOMEM;
}
@@ -593,7 +603,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
/* MCP not initialized */
if (!ecore_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
return ECORE_BUSY;
}
@@ -2121,19 +2131,20 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_media_type)
{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* TODO - Add support for VFs */
if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
return ECORE_BUSY;
}
if (!p_ptt) {
*p_media_type = MEDIA_UNSPECIFIED;
- return ECORE_INVAL;
+ rc = ECORE_INVAL;
} else {
*p_media_type = ecore_rd(p_hwfn, p_ptt,
p_hwfn->mcp_info->port_addr +
@@ -2144,6 +2155,197 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_tranceiver_type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+ if (!p_ptt) {
+ *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ rc = ECORE_INVAL;
+ } else {
+ *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+ }
+
+ return rc;
+}
+
+static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
+{
+ if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
+ ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
+ (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
+ return 1;
+
+ return 0;
+}
+
+enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_speed_mask)
+{
+ u32 transceiver_data, transceiver_type, transceiver_state;
+
+ ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
+
+ transceiver_state = GET_MFW_FIELD(transceiver_data,
+ ETH_TRANSCEIVER_STATE);
+
+ transceiver_type = GET_MFW_FIELD(transceiver_data,
+ ETH_TRANSCEIVER_TYPE);
+
+ if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
+ return ECORE_INVAL;
+
+ switch (transceiver_type) {
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
+ case ETH_TRANSCEIVER_TYPE_1G_PCC:
+ case ETH_TRANSCEIVER_TYPE_1G_ACC:
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
+ case ETH_TRANSCEIVER_TYPE_10G_PCC:
+ case ETH_TRANSCEIVER_TYPE_10G_ACC:
+ case ETH_TRANSCEIVER_TYPE_4x10G:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_100G_AOC:
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
+ case ETH_TRANSCEIVER_TYPE_100G_LR4:
+ case ETH_TRANSCEIVER_TYPE_100G_ER4:
+ case ETH_TRANSCEIVER_TYPE_100G_ACC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_25G_LR:
+ case ETH_TRANSCEIVER_TYPE_25G_AOC:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_25G_CA_N:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_S:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_L:
+ case ETH_TRANSCEIVER_TYPE_4x25G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_XLPPI:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ default:
+ DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
+ transceiver_type);
+ *p_speed_mask = 0xff;
+ break;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_board_config)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+ if (!p_ptt) {
+ *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+ rc = ECORE_INVAL;
+ } else {
+ nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
+ MISC_REG_GEN_PURP_CR0);
+ nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
+ nvm_cfg_addr + 4);
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ *p_board_config = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ board_cfg));
+ }
+
+ return rc;
+}
+
/* @DPDK */
/* Old MFW has a global configuration for all PFs regarding RDMA support */
static void
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 6afaf7de..c422736f 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 225890e2..06b33bb8 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -595,6 +595,52 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
u32 *media_type);
/**
+ * @brief Get transceiver data of the port.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_transceiver_type - media type value
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_tranceiver_type);
+
+/**
+ * @brief Get transceiver supported speed mask.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_speed_mask - Bit mask of all supported speeds.
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+
+enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_speed_mask);
+
+/**
+ * @brief Get board configuration.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_board_config - Board config.
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_board_config);
+
+/**
* @brief - Sends a command to the MCP mailbox.
*
* @param p_hwfn - hw function
diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c
index 3a1de094..b48076a3 100644
--- a/drivers/net/qede/base/ecore_mng_tlv.c
+++ b/drivers/net/qede/base/ecore_mng_tlv.c
@@ -1,3 +1,11 @@
+/*
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_status.h"
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index abca7408..f049d821 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -31,6 +31,9 @@ struct ecore_eth_pf_params {
* This will set the maximal number of configured steering-filters.
*/
u32 num_arfs_filters;
+
+ /* To allow VF to change its MAC despite of PF set forced MAC. */
+ bool allow_vf_mac_change;
};
/* Most of the parameters below are described in the FW iSCSI / TCP HSI */
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 1d085815..7dec2dd5 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -205,329 +205,336 @@
#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34211
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34212
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34213
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34214
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34215
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34216
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34217
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34218
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34219
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34220
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34221
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34222
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34223
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34224
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34225
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34226
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34227
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34228
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34229
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34230
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34231
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34232
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34233
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34234
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34235
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34236
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34237
-#define QM_REG_PQTX2PF_0_RT_OFFSET 34238
-#define QM_REG_PQTX2PF_1_RT_OFFSET 34239
-#define QM_REG_PQTX2PF_2_RT_OFFSET 34240
-#define QM_REG_PQTX2PF_3_RT_OFFSET 34241
-#define QM_REG_PQTX2PF_4_RT_OFFSET 34242
-#define QM_REG_PQTX2PF_5_RT_OFFSET 34243
-#define QM_REG_PQTX2PF_6_RT_OFFSET 34244
-#define QM_REG_PQTX2PF_7_RT_OFFSET 34245
-#define QM_REG_PQTX2PF_8_RT_OFFSET 34246
-#define QM_REG_PQTX2PF_9_RT_OFFSET 34247
-#define QM_REG_PQTX2PF_10_RT_OFFSET 34248
-#define QM_REG_PQTX2PF_11_RT_OFFSET 34249
-#define QM_REG_PQTX2PF_12_RT_OFFSET 34250
-#define QM_REG_PQTX2PF_13_RT_OFFSET 34251
-#define QM_REG_PQTX2PF_14_RT_OFFSET 34252
-#define QM_REG_PQTX2PF_15_RT_OFFSET 34253
-#define QM_REG_PQTX2PF_16_RT_OFFSET 34254
-#define QM_REG_PQTX2PF_17_RT_OFFSET 34255
-#define QM_REG_PQTX2PF_18_RT_OFFSET 34256
-#define QM_REG_PQTX2PF_19_RT_OFFSET 34257
-#define QM_REG_PQTX2PF_20_RT_OFFSET 34258
-#define QM_REG_PQTX2PF_21_RT_OFFSET 34259
-#define QM_REG_PQTX2PF_22_RT_OFFSET 34260
-#define QM_REG_PQTX2PF_23_RT_OFFSET 34261
-#define QM_REG_PQTX2PF_24_RT_OFFSET 34262
-#define QM_REG_PQTX2PF_25_RT_OFFSET 34263
-#define QM_REG_PQTX2PF_26_RT_OFFSET 34264
-#define QM_REG_PQTX2PF_27_RT_OFFSET 34265
-#define QM_REG_PQTX2PF_28_RT_OFFSET 34266
-#define QM_REG_PQTX2PF_29_RT_OFFSET 34267
-#define QM_REG_PQTX2PF_30_RT_OFFSET 34268
-#define QM_REG_PQTX2PF_31_RT_OFFSET 34269
-#define QM_REG_PQTX2PF_32_RT_OFFSET 34270
-#define QM_REG_PQTX2PF_33_RT_OFFSET 34271
-#define QM_REG_PQTX2PF_34_RT_OFFSET 34272
-#define QM_REG_PQTX2PF_35_RT_OFFSET 34273
-#define QM_REG_PQTX2PF_36_RT_OFFSET 34274
-#define QM_REG_PQTX2PF_37_RT_OFFSET 34275
-#define QM_REG_PQTX2PF_38_RT_OFFSET 34276
-#define QM_REG_PQTX2PF_39_RT_OFFSET 34277
-#define QM_REG_PQTX2PF_40_RT_OFFSET 34278
-#define QM_REG_PQTX2PF_41_RT_OFFSET 34279
-#define QM_REG_PQTX2PF_42_RT_OFFSET 34280
-#define QM_REG_PQTX2PF_43_RT_OFFSET 34281
-#define QM_REG_PQTX2PF_44_RT_OFFSET 34282
-#define QM_REG_PQTX2PF_45_RT_OFFSET 34283
-#define QM_REG_PQTX2PF_46_RT_OFFSET 34284
-#define QM_REG_PQTX2PF_47_RT_OFFSET 34285
-#define QM_REG_PQTX2PF_48_RT_OFFSET 34286
-#define QM_REG_PQTX2PF_49_RT_OFFSET 34287
-#define QM_REG_PQTX2PF_50_RT_OFFSET 34288
-#define QM_REG_PQTX2PF_51_RT_OFFSET 34289
-#define QM_REG_PQTX2PF_52_RT_OFFSET 34290
-#define QM_REG_PQTX2PF_53_RT_OFFSET 34291
-#define QM_REG_PQTX2PF_54_RT_OFFSET 34292
-#define QM_REG_PQTX2PF_55_RT_OFFSET 34293
-#define QM_REG_PQTX2PF_56_RT_OFFSET 34294
-#define QM_REG_PQTX2PF_57_RT_OFFSET 34295
-#define QM_REG_PQTX2PF_58_RT_OFFSET 34296
-#define QM_REG_PQTX2PF_59_RT_OFFSET 34297
-#define QM_REG_PQTX2PF_60_RT_OFFSET 34298
-#define QM_REG_PQTX2PF_61_RT_OFFSET 34299
-#define QM_REG_PQTX2PF_62_RT_OFFSET 34300
-#define QM_REG_PQTX2PF_63_RT_OFFSET 34301
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34302
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34303
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34304
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34305
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34306
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34307
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34308
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34309
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34310
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34311
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34312
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34313
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34314
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34315
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34316
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34317
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34318
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34319
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34320
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34321
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34322
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34323
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34324
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34325
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34326
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34327
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34328
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34329
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34330
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34586
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 34842
+#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 35098
-#define QM_REG_RLPFPERIOD_RT_OFFSET 35099
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35100
-#define QM_REG_RLPFINCVAL_RT_OFFSET 35101
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35117
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 35133
+#define QM_REG_RLPFCRD_RT_OFFSET 35389
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 35149
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35150
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35151
+#define QM_REG_RLPFENABLE_RT_OFFSET 35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35167
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 35183
+#define QM_REG_WFQPFCRD_RT_OFFSET 35439
#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 35439
-#define QM_REG_WFQVPENABLE_RT_OFFSET 35440
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35441
+#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 35953
+#define QM_REG_TXPQMAP_RT_OFFSET 36209
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36465
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 36977
+#define QM_REG_WFQVPCRD_RT_OFFSET 37233
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 37489
+#define QM_REG_WFQVPMAP_RT_OFFSET 37745
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 38001
+#define QM_REG_PTRTBLTX_RT_OFFSET 38257
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 38321
+#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 38357
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 38393
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 38394
-#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 38395
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 38396
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 38397
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 38398
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 38399
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 38400
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 38401
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 38405
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 38409
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 38441
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 38457
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 38473
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 38489
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 38505
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 38506
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 38507
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 38515
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 39539
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 40051
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 40563
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 41075
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 41587
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 41619
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 41620
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 41621
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 41622
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 41623
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 41624
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 41625
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 41626
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 41627
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 41628
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 41629
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 41630
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 41631
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 41632
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 41633
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 41634
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 41635
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 41636
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 41637
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 41638
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 41639
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 41640
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 41641
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 41642
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 41643
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 41644
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 41645
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 41646
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 41647
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 41648
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 41649
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 41650
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 41651
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 41652
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 41653
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 41654
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 41655
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 41656
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 41657
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 41658
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 41659
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 41660
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 41661
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 41662
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 41663
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 41664
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 41665
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 41666
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 41667
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 41668
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 41669
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 41670
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 41671
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 41672
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 41673
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 41674
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 41675
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 41676
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 41677
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 41678
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 41679
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 41680
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 41681
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 41682
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 41683
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 41684
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 41685
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 41686
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 41687
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 41688
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 41689
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 41690
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 41691
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 41692
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 41693
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 41694
-#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 41695
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 41696
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 41697
-#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 41698
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 41699
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 41700
-#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 41701
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 41702
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 41703
-#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 41704
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 41705
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 41706
-#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 41707
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 41708
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 41709
-#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 41710
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 41711
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 41712
-#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 41713
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 41714
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 41715
-#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 41716
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 41717
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 41718
-#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 41719
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 41720
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 41721
-#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 41722
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 41723
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 41724
-#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 41725
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 41726
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 41727
-#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 41728
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 41729
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 41730
-#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 41731
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 41732
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 41733
-#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 41734
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 41735
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 41736
-#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 41737
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 41738
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 41739
-#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 41740
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 41741
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 41742
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
-#define RUNTIME_ARRAY_SIZE 41743
+#define RUNTIME_ARRAY_SIZE 43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB 0
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
index 86e84964..469bf1d6 100644
--- a/drivers/net/qede/base/ecore_sp_api.h
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index 7598e7a6..47c1febf 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -295,6 +295,7 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
}
#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -308,7 +309,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 page_cnt;
- int i;
+ u8 i;
/* update initial eq producer */
ecore_eq_prod_update(p_hwfn,
@@ -343,18 +344,27 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->outer_tag_config.outer_tag.tci =
OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
+ } else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_hwfn->p_dev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ }
+
+ p_ramrod->outer_tag_config.pri_map_valid = 1;
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
+ p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
+ /* enable_stag_pri_change should be set if port is in BD mode or,
+ * UFP with Host Control mode or, UFP with DCB over base interface.
+ */
if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
- p_ramrod->outer_tag_config.outer_tag.tpid =
- OSAL_CPU_TO_LE16(ETH_P_8021Q);
- if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+ if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
+ (p_hwfn->p_dcbx_info->results.dcbx_enabled))
p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
else
p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
- p_ramrod->outer_tag_config.pri_map_valid = 1;
- for (i = 0; i < 8; i++)
- p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] =
- (u8)i;
}
/* Place EQ address in RAMROD */
@@ -451,7 +461,8 @@ enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
return rc;
p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
- if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+ if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
+ (p_hwfn->p_dcbx_info->results.dcbx_enabled))
p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
else
p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index 98009c65..d160a76e 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 70ffa8cd..db169e6e 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -30,7 +30,7 @@
#define SPQ_BLOCK_DELAY_MAX_ITER (10)
#define SPQ_BLOCK_DELAY_US (10)
-#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
+#define SPQ_BLOCK_SLEEP_MAX_ITER (200)
#define SPQ_BLOCK_SLEEP_MS (5)
/***************************************************************************
@@ -60,8 +60,12 @@ static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
u32 iter_cnt;
comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
- iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+ iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
: SPQ_BLOCK_DELAY_MAX_ITER;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
+ iter_cnt *= 5;
+#endif
while (iter_cnt--) {
OSAL_POLL_MODE_DPC(p_hwfn);
@@ -138,6 +142,14 @@ err:
return ECORE_BUSY;
}
+void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
+ u32 spq_timeout_ms)
+{
+ p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
+ spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
+ SPQ_BLOCK_SLEEP_MAX_ITER;
+}
+
/***************************************************************************
* SPQ entries inner API
***************************************************************************/
@@ -389,7 +401,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
/* Allocate EQ struct */
p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
if (!p_eq) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_eq'\n");
return ECORE_NOMEM;
}
@@ -402,7 +414,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
num_elem,
sizeof(union event_ring_element),
&p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
goto eq_allocate_fail;
}
@@ -547,8 +559,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
if (!p_spq) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_spq'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
return ECORE_NOMEM;
}
@@ -560,7 +571,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
&p_spq->chain, OSAL_NULL)) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
goto spq_allocate_fail;
}
@@ -576,7 +587,8 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq->p_phys = p_phys;
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
+ goto spq_allocate_fail;
#endif
p_hwfn->p_spq = p_spq;
@@ -630,9 +642,7 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
if (!p_ent) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate an SPQ entry for a pending"
- " ramrod\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
rc = ECORE_NOMEM;
goto out_unlock;
}
@@ -1013,7 +1023,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
p_consq =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
if (!p_consq) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_consq'\n");
return ECORE_NOMEM;
}
@@ -1026,7 +1036,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
ECORE_CHAIN_PAGE_SIZE / 0x80,
0x80,
&p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 526cff08..7d9be3e9 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -116,6 +116,9 @@ struct ecore_spq {
dma_addr_t p_phys;
struct ecore_spq_entry *p_virt;
+ /* SPQ max sleep iterations used in __ecore_spq_block() */
+ u32 block_sleep_max_iter;
+
/* Bitmap for handling out-of-order completions */
#define SPQ_RING_SIZE \
(CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
@@ -150,6 +153,16 @@ struct ecore_port;
struct ecore_hwfn;
/**
+ * @brief ecore_set_spq_block_timeout - calculates the maximum sleep
+ * iterations used in __ecore_spq_block();
+ *
+ * @param p_hwfn
+ * @param spq_timeout_ms
+ */
+void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
+ u32 spq_timeout_ms);
+
+/**
* @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that
* Pends it to the future list.
*
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index b1e26d6f..451aabb1 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -590,8 +590,7 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
if (!p_sriov) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_sriov'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
return ECORE_NOMEM;
}
@@ -648,7 +647,7 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->p_iov_info));
if (!p_dev->p_iov_info) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Can't support IOV due to lack of memory\n");
return ECORE_NOMEM;
}
@@ -1968,7 +1967,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
if (!p_vf->vport_instance)
return ECORE_INVAL;
- if (events & (1 << MAC_ADDR_FORCED)) {
+ if ((events & (1 << MAC_ADDR_FORCED)) ||
+ p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1989,7 +1989,11 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
return rc;
}
- p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ p_vf->configured_features |=
+ 1 << VFPF_BULLETIN_MAC_ADDR;
+ else
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
if (events & (1 << VLAN_ADDR_FORCED)) {
@@ -2975,8 +2979,7 @@ ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
p_data->update_approx_mcast_flg = 1;
OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
- sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
}
@@ -4370,7 +4373,11 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
return;
}
- feature = 1 << MAC_ADDR_FORCED;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+ else
+ feature = 1 << MAC_ADDR_FORCED;
+
OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
@@ -4411,9 +4418,13 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
return ECORE_SUCCESS;
}
+#ifndef LINUX_REMOVE
enum _ecore_status_t
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
bool b_untagged_only, int vfid)
@@ -4470,6 +4481,7 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
*opaque_fid = vf_info->opaque_fid;
}
+#endif
void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 pvid, int vfid)
@@ -4657,6 +4669,22 @@ u32 ecore_iov_pfvf_msg_length(void)
return sizeof(union pfvf_tlvs);
}
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return OSAL_NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap &
+ (1 << VFPF_BULLETIN_MAC_ADDR)))
+ return OSAL_NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
{
struct ecore_vf_info *p_vf;
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index 850b1052..8d846d3e 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h
index c77ec260..3af2b57d 100644
--- a/drivers/net/qede/base/ecore_status.h
+++ b/drivers/net/qede/base/ecore_status.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h
index 034cf1eb..f6459c35 100644
--- a/drivers/net/qede/base/ecore_utils.h
+++ b/drivers/net/qede/base/ecore_utils.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index e0f2dd5a..b7b3b872 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -1275,8 +1275,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
resp_size += sizeof(struct pfvf_def_resp_tlv);
OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
- sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
}
update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1473,7 +1472,7 @@ void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
u32 bit;
bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- OSAL_SET_BIT(bit, sp_params.bins);
+ sp_params.bins[bit / 32] |= 1 << (bit % 32);
}
}
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index de2758cb..e26b30bf 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
index 9815cf8a..af7bc36b 100644
--- a/drivers/net/qede/base/ecore_vf_api.h
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index ecb00649..dce937e0 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -396,7 +396,13 @@ struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
- u64 bins[8];
+ /* This was a mistake; There are only 256 approx bins,
+ * and in HSI they're divided into 32-bit values.
+ * As old VFs used to set-bit to the values on its side,
+ * the upper half of the array is never expected to contain any data.
+ */
+ u64 bins[4];
+ u64 obsolete_bins[4];
};
struct vfpf_vport_update_accept_param_tlv {
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 45a0356d..9de49b64 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -119,6 +119,9 @@
/* Number of etherType values configured by driver for control frame check */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+/* GFS constants */
+#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
+
/*
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 81ca6634..752473e1 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -800,6 +800,7 @@ struct public_port {
#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
@@ -1777,6 +1778,8 @@ struct public_drv_mb {
#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001
/* MFW supports EEE */
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
+/* MFW supports DRV_LOAD Timeout */
+#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004
/* MFW supports virtual link */
#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index c99e805d..a20d0674 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index ad15d28a..71f0ca13 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1,15 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -1222,3 +1214,5 @@
#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10)
#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL
#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
+
+#define RSS_REG_RSS_RAM_MASK 0x238c10UL
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index a91f4368..3206cc6c 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -16,7 +16,7 @@ int qede_logtype_init;
int qede_logtype_driver;
static const struct qed_eth_ops *qed_ops;
-static int64_t timer_period = 1;
+#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
/* VXLAN tunnel classification mapping */
const struct _qede_udp_tunn_types {
@@ -499,6 +499,9 @@ qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
return 0;
}
+#define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
+#define QEDE_VF_TX_SWITCHING "vf_tx_switching"
+
/* Activate or deactivate vport via vport-update */
int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
{
@@ -516,10 +519,12 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
if (!qdev->enable_tx_switching) {
- if (IS_VF(edev)) {
+ if ((QEDE_NPAR_TX_SWITCHING != NULL) ||
+ ((QEDE_VF_TX_SWITCHING != NULL) && IS_VF(edev))) {
params.update_tx_switching_flg = 1;
params.tx_switching_flg = !flg;
- DP_INFO(edev, "VF tx-switching is disabled\n");
+ DP_INFO(edev, "%s tx-switching is disabled\n",
+ QEDE_NPAR_TX_SWITCHING ? "NPAR" : "VF");
}
}
for_each_hwfn(edev, i) {
@@ -591,6 +596,8 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
}
}
qdev->enable_lro = flg;
+ eth_dev->data->lro = flg;
+
DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
return 0;
@@ -780,6 +787,36 @@ qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
}
static int
+qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.ip_gre.b_update_mode = true;
+ tunn.ip_gre.b_mode_enabled = enable;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->ipgre.enable = enable;
+ DP_INFO(edev, "IPGRE is %s\n",
+ enable ? "enabled" : "disabled");
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+static int
qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
enum rte_eth_tunnel_type tunn_type, bool enable)
{
@@ -792,6 +829,9 @@ qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
case RTE_TUNNEL_TYPE_GENEVE:
rc = qede_geneve_enable(eth_dev, clss, enable);
break;
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ rc = qede_ipgre_enable(eth_dev, clss, enable);
+ break;
default:
rc = -EINVAL;
break;
@@ -817,10 +857,10 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
ETHER_ADDR_LEN) == 0) &&
ucast->vni == tmp->vni &&
ucast->vlan == tmp->vlan) {
- DP_ERR(edev, "Unicast MAC is already added"
- " with vlan = %u, vni = %u\n",
- ucast->vlan, ucast->vni);
- return -EEXIST;
+ DP_INFO(edev, "Unicast MAC is already added"
+ " with vlan = %u, vni = %u\n",
+ ucast->vlan, ucast->vni);
+ return 0;
}
}
u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
@@ -854,110 +894,95 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
}
static int
-qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
- bool add)
+qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ether_addr *mac_addr;
- struct qede_mcast_entry *tmp = NULL;
- struct qede_mcast_entry *m;
+ struct ecore_filter_mcast mcast;
+ struct qede_mcast_entry *m = NULL;
+ uint8_t i;
+ int rc;
- mac_addr = (struct ether_addr *)mcast->mac;
- if (add) {
- SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
- if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
- DP_ERR(edev,
- "Multicast MAC is already added\n");
- return -EEXIST;
- }
- }
+ for (i = 0; i < mc_addrs_num; i++) {
m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
- RTE_CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE);
if (!m) {
- DP_ERR(edev,
- "Did not allocate memory for mcast\n");
+ DP_ERR(edev, "Did not allocate memory for mcast\n");
return -ENOMEM;
}
- ether_addr_copy(mac_addr, &m->mac);
+ ether_addr_copy(&mc_addrs[i], &m->mac);
SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
- qdev->num_mc_addr++;
- } else {
- SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
- if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
- break;
- }
- if (tmp == NULL) {
- DP_INFO(edev, "Multicast mac is not found\n");
- return -EINVAL;
- }
- SLIST_REMOVE(&qdev->mc_list_head, tmp,
- qede_mcast_entry, list);
- qdev->num_mc_addr--;
+ }
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.num_mc_addrs = mc_addrs_num;
+ mcast.opcode = ECORE_FILTER_ADD;
+ for (i = 0; i < mc_addrs_num; i++)
+ ether_addr_copy(&mc_addrs[i], (struct ether_addr *)
+ &mcast.mac[i]);
+ rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
+ return -1;
}
return 0;
}
+static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_mcast_entry *tmp = NULL;
+ struct ecore_filter_mcast mcast;
+ int j;
+ int rc;
+
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.num_mc_addrs = qdev->num_mc_addr;
+ mcast.opcode = ECORE_FILTER_REMOVE;
+ j = 0;
+ SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+ ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]);
+ j++;
+ }
+ rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to delete multicast filter\n");
+ return -1;
+ }
+ /* Init the list */
+ while (!SLIST_EMPTY(&qdev->mc_list_head)) {
+ tmp = SLIST_FIRST(&qdev->mc_list_head);
+ SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
+ }
+ SLIST_INIT(&qdev->mc_list_head);
+
+ return 0;
+}
+
static enum _ecore_status_t
qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc;
- struct ecore_filter_mcast mcast;
- struct qede_mcast_entry *tmp;
- uint16_t j = 0;
+ enum _ecore_status_t rc = ECORE_INVAL;
- /* Multicast */
- if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
- if (add) {
- if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
- DP_ERR(edev,
- "Mcast filter table limit exceeded, "
- "Please enable mcast promisc mode\n");
- return -ECORE_INVAL;
- }
- }
- rc = qede_mcast_filter(eth_dev, ucast, add);
- if (rc == 0) {
- DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
- memset(&mcast, 0, sizeof(mcast));
- mcast.num_mc_addrs = qdev->num_mc_addr;
- mcast.opcode = ECORE_FILTER_ADD;
- SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
- ether_addr_copy(&tmp->mac,
- (struct ether_addr *)&mcast.mac[j]);
- j++;
- }
- rc = ecore_filter_mcast_cmd(edev, &mcast,
- ECORE_SPQ_MODE_CB, NULL);
- }
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to add multicast filter"
- " rc = %d, op = %d\n", rc, add);
- }
- } else { /* Unicast */
- if (add) {
- if (qdev->num_uc_addr >=
- qdev->dev_info.num_mac_filters) {
- DP_ERR(edev,
- "Ucast filter table limit exceeded,"
- " Please enable promisc mode\n");
- return -ECORE_INVAL;
- }
- }
- rc = qede_ucast_filter(eth_dev, ucast, add);
- if (rc == 0)
- rc = ecore_filter_ucast_cmd(edev, ucast,
- ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
- rc, add);
- }
+ if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
+ DP_ERR(edev, "Ucast filter table limit exceeded,"
+ " Please enable promisc mode\n");
+ return ECORE_INVAL;
}
+ rc = qede_ucast_filter(eth_dev, ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
+ rc, add);
+
return rc;
}
@@ -998,10 +1023,10 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
ether_addr_copy(&eth_dev->data->mac_addrs[index],
(struct ether_addr *)&ucast.mac);
- ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+ qede_mac_int_ops(eth_dev, &ucast, false);
}
-static void
+static int
qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
@@ -1010,12 +1035,11 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
mac_addr->addr_bytes)) {
DP_ERR(edev, "Setting MAC address is not allowed\n");
- ether_addr_copy(&qdev->primary_mac,
- &eth_dev->data->mac_addrs[0]);
- return;
+ return -EPERM;
}
qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
+ return 0;
}
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
@@ -1093,9 +1117,9 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
if (tmp->vid == vlan_id) {
- DP_ERR(edev, "VLAN %u already configured\n",
- vlan_id);
- return -EEXIST;
+ DP_INFO(edev, "VLAN %u already configured\n",
+ vlan_id);
+ return 0;
}
}
@@ -1166,10 +1190,10 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->hw_vlan_strip)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
(void)qede_vlan_stripping(eth_dev, 1);
else
(void)qede_vlan_stripping(eth_dev, 0);
@@ -1177,7 +1201,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
if (mask & ETH_VLAN_FILTER_MASK) {
/* VLAN filtering kicks in when a VLAN is added */
- if (rxmode->hw_vlan_filter) {
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
qede_vlan_filter_set(eth_dev, 0, 1);
} else {
if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -1187,7 +1211,8 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
/* Signal app that VLAN filtering is still
* enabled
*/
- rxmode->hw_vlan_filter = true;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_FILTER;
} else {
qede_vlan_filter_set(eth_dev, 0, 0);
}
@@ -1195,13 +1220,11 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
}
if (mask & ETH_VLAN_EXTEND_MASK)
- DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
- " and classification is based on outer tag only\n");
+ DP_ERR(edev, "Extend VLAN not supported\n");
qdev->vlan_offload_mask = mask;
- DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
- mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+ DP_INFO(edev, "VLAN offload mask %d\n", mask);
return 0;
}
@@ -1267,19 +1290,19 @@ static void qede_fastpath_start(struct ecore_dev *edev)
static int qede_dev_start(struct rte_eth_dev *eth_dev)
{
- struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
PMD_INIT_FUNC_TRACE(edev);
/* Configure TPA parameters */
- if (rxmode->enable_lro) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
if (qede_enable_tpa(eth_dev, true))
return -EINVAL;
/* Enable scatter mode for LRO */
- if (!rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
+ if (!eth_dev->data->scattered_rx)
+ rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
}
/* Start queues */
@@ -1294,7 +1317,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
- if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
if (qede_config_rss(eth_dev))
goto err;
@@ -1336,13 +1359,15 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
/* Disable traffic */
ecore_hw_stop_fastpath(edev); /* TBD - loop */
+ if (IS_PF(edev))
+ qede_mac_addr_remove(eth_dev, 0);
+
DP_INFO(edev, "Device is stopped\n");
}
-#define QEDE_TX_SWITCHING "vf_txswitch"
-
const char *valid_args[] = {
- QEDE_TX_SWITCHING,
+ QEDE_NPAR_TX_SWITCHING,
+ QEDE_VF_TX_SWITCHING,
NULL,
};
@@ -1361,7 +1386,8 @@ static int qede_args_check(const char *key, const char *val, void *opaque)
return errno;
}
- if (strcmp(QEDE_TX_SWITCHING, key) == 0)
+ if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
+ (strcmp(QEDE_VF_TX_SWITCHING, key) == 0))
qdev->enable_tx_switching = !!tmp;
return ret;
@@ -1411,15 +1437,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
/* Check requirements for 100G mode */
if (ECORE_IS_CMT(edev)) {
if (eth_dev->data->nb_rx_queues < 2 ||
- eth_dev->data->nb_tx_queues < 2) {
+ eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
- (eth_dev->data->nb_tx_queues % 2 != 0)) {
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
DP_ERR(edev,
- "100G mode needs even no. of RX/TX queues\n");
+ "100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
}
@@ -1439,20 +1465,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (qede_args(eth_dev))
return -ENOTSUP;
- /* Sanity checks and throw warnings */
- if (rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
-
- if (!rxmode->hw_strip_crc)
- DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
-
- if (!rxmode->hw_ip_checksum)
- DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
- "in hw\n");
- if (rxmode->header_split)
- DP_INFO(edev, "Header split enable is not supported\n");
- if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
- ETH_MQ_RX_RSS)) {
+ if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
+ rxmode->mq_mode == ETH_MQ_RX_RSS)) {
DP_ERR(edev, "Unsupported multi-queue mode\n");
return -ENOTSUP;
}
@@ -1467,19 +1481,23 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
return -ENOMEM;
/* If jumbo enabled adjust MTU */
- if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
eth_dev->data->mtu =
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+ eth_dev->data->scattered_rx = 1;
if (qede_start_vport(qdev, eth_dev->data->mtu))
return -1;
+
qdev->mtu = eth_dev->data->mtu;
/* Enable VLAN offloads by default */
ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
if (ret)
return ret;
@@ -1515,7 +1533,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
@@ -1534,26 +1551,46 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .txq_flags = QEDE_TXQ_FLAGS,
- };
-
- dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO);
-
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+ dev_info->rx_queue_offload_capa = 0;
+
+ /* TX offloads are on a per-packet basis, so it is applicable
+ * to both at port and queue levels.
+ */
dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+ };
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ /* Packets are always dropped if no descriptors are available */
+ .rx_drop_en = 1,
+ /* The below RX offloads are always enabled */
+ .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM),
+ };
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
@@ -1661,7 +1698,7 @@ static void qede_poll_sp_sb_cb(void *param)
qede_interrupt_action(ECORE_LEADING_HWFN(edev));
qede_interrupt_action(&edev->hwfns[1]);
- rc = rte_eal_alarm_set(timer_period * US_PER_S,
+ rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
qede_poll_sp_sb_cb,
(void *)eth_dev);
if (rc != 0) {
@@ -1990,6 +2027,35 @@ static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
QED_FILTER_RX_MODE_TYPE_REGULAR);
}
+static int
+qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint8_t i;
+
+ if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
+ DP_ERR(edev, "Reached max multicast filters limit,"
+ "Please enable multicast promisc mode\n");
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ if (!is_multicast_ether_addr(&mc_addrs[i])) {
+ DP_ERR(edev, "Not a valid multicast MAC\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Flush all existing entries */
+ if (qede_del_mcast_filters(eth_dev))
+ return -1;
+
+ /* Set new mcast list */
+ return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
+}
+
static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf)
{
@@ -2065,6 +2131,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
RTE_PTYPE_TUNNEL_VXLAN,
RTE_PTYPE_L4_FRAG,
RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_GRE,
/* Inner */
RTE_PTYPE_INNER_L2_ETHER,
RTE_PTYPE_INNER_L2_ETHER_VLAN,
@@ -2391,6 +2458,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
dev->data->dev_started = 0;
qede_dev_stop(dev);
restart = true;
+ } else {
+ if (IS_PF(edev))
+ qede_mac_addr_remove(dev, 0);
}
rte_delay_ms(1000);
qede_start_vport(qdev, mtu); /* Recreate vport */
@@ -2418,7 +2488,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
dev->data->dev_conf.rxmode.jumbo_frame = 0;
/* Restore config lost due to vport stop */
- qede_mac_addr_set(dev, &qdev->primary_mac);
+ if (IS_PF(edev))
+ qede_mac_addr_set(dev, &qdev->primary_mac);
+
if (dev->data->promiscuous)
qede_promiscuous_enable(dev);
else
@@ -2488,7 +2560,6 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN, false);
break;
-
case RTE_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
DP_ERR(edev, "UDP port %u doesn't exist\n",
@@ -2578,7 +2649,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
qdev->vxlan.udp_port = udp_port;
break;
-
case RTE_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
DP_INFO(edev,
@@ -2616,7 +2686,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
qdev->geneve.udp_port = udp_port;
break;
-
default:
return ECORE_INVAL;
}
@@ -2782,7 +2851,8 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
qdev->geneve.filter_type = conf->filter_type;
}
- if (!qdev->vxlan.enable || !qdev->geneve.enable)
+ if (!qdev->vxlan.enable || !qdev->geneve.enable ||
+ !qdev->ipgre.enable)
return qede_tunn_enable(eth_dev, clss,
conf->tunnel_type,
true);
@@ -2818,15 +2888,14 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
switch (filter_conf->tunnel_type) {
case RTE_TUNNEL_TYPE_VXLAN:
case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
DP_INFO(edev,
"Packet steering to the specified Rx queue"
" is not supported with UDP tunneling");
return(qede_tunn_filter_config(eth_dev, filter_op,
filter_conf));
- /* Place holders for future tunneling support */
case RTE_TUNNEL_TYPE_TEREDO:
case RTE_TUNNEL_TYPE_NVGRE:
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
case RTE_L2_TUNNEL_TYPE_E_TAG:
DP_ERR(edev, "Unsupported tunnel type %d\n",
filter_conf->tunnel_type);
@@ -2871,6 +2940,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.promiscuous_disable = qede_promiscuous_disable,
.allmulticast_enable = qede_allmulticast_enable,
.allmulticast_disable = qede_allmulticast_disable,
+ .set_mc_addr_list = qede_set_mc_addr_list,
.dev_stop = qede_dev_stop,
.dev_close = qede_dev_close,
.stats_get = qede_get_stats,
@@ -2911,6 +2981,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.promiscuous_disable = qede_promiscuous_disable,
.allmulticast_enable = qede_allmulticast_enable,
.allmulticast_disable = qede_allmulticast_disable,
+ .set_mc_addr_list = qede_set_mc_addr_list,
.dev_stop = qede_dev_stop,
.dev_close = qede_dev_close,
.stats_get = qede_get_stats,
@@ -3022,7 +3093,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
* interrupt vector but we need one for each engine.
*/
if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
- rc = rte_eal_alarm_set(timer_period * US_PER_S,
+ rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
qede_poll_sp_sb_cb,
(void *)eth_dev);
if (rc != 0) {
@@ -3119,25 +3190,30 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
SLIST_INIT(&adapter->fdir_info.fdir_list_head);
SLIST_INIT(&adapter->vlan_list_head);
SLIST_INIT(&adapter->uc_list_head);
+ SLIST_INIT(&adapter->mc_list_head);
adapter->mtu = ETHER_MTU;
adapter->vport_started = false;
/* VF tunnel offloads is enabled by default in PF driver */
adapter->vxlan.num_filters = 0;
adapter->geneve.num_filters = 0;
+ adapter->ipgre.num_filters = 0;
if (is_vf) {
adapter->vxlan.enable = true;
adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
ETH_TUNNEL_FILTER_IVLAN;
adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
adapter->geneve.enable = true;
-
adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
ETH_TUNNEL_FILTER_IVLAN;
adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
+ adapter->ipgre.enable = true;
+ adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
+ ETH_TUNNEL_FILTER_IVLAN;
} else {
adapter->vxlan.enable = false;
adapter->geneve.enable = false;
+ adapter->ipgre.enable = false;
}
DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 23f7e0e2..a335d4da 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -45,7 +45,7 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 7
+#define QEDE_PMD_VERSION_MINOR 8
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
@@ -170,7 +170,7 @@ struct qede_fdir_info {
#define QEDE_VXLAN_DEF_PORT (4789)
#define QEDE_GENEVE_DEF_PORT (6081)
-struct qede_udp_tunn {
+struct qede_tunn_params {
bool enable;
uint16_t num_filters;
uint16_t filter_type;
@@ -205,8 +205,9 @@ struct qede_dev {
SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
uint16_t num_uc_addr;
bool handle_hw_err;
- struct qede_udp_tunn vxlan;
- struct qede_udp_tunn geneve;
+ struct qede_tunn_params vxlan;
+ struct qede_tunn_params geneve;
+ struct qede_tunn_params ipgre;
struct qede_fdir_info fdir_info;
bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
index da6364ee..9d0b0526 100644
--- a/drivers/net/qede/qede_fdir.c
+++ b/drivers/net/qede/qede_fdir.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2017 QLogic Corporation.
+ * Copyright (c) 2017 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -141,8 +141,8 @@ qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
if (add) {
SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
- DP_ERR(edev, "flowdir filter exist\n");
- rc = -EEXIST;
+ DP_INFO(edev, "flowdir filter exist\n");
+ rc = 0;
goto err2;
}
}
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 246f0fd3..01f17c9a 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
index 159315e7..e7f714f2 100644
--- a/drivers/net/qede/qede_logs.h
+++ b/drivers/net/qede/qede_logs.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index ae187321..c3407fe9 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -9,6 +9,7 @@
#include <limits.h>
#include <time.h>
#include <rte_alarm.h>
+#include <rte_string_fns.h>
#include "qede_ethdev.h"
@@ -19,7 +20,7 @@
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.30.12.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.33.12.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -62,6 +63,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
hw_prepare_params.chk_reg_fifo = false;
hw_prepare_params.initiate_pf_flr = true;
hw_prepare_params.allow_mdump = false;
+ hw_prepare_params.b_en_pacing = false;
hw_prepare_params.epoch = (u32)time(NULL);
rc = ecore_hw_prepare(edev, &hw_prepare_params);
if (rc) {
@@ -302,9 +304,8 @@ static int qed_slowpath_start(struct ecore_dev *edev,
drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) |
(params->drv_rev << 8) | (params->drv_eng);
- /* TBD: strlcpy() */
- strncpy((char *)drv_version.name, (const char *)params->name,
- MCP_DRV_VER_STR_SIZE - 4);
+ strlcpy((char *)drv_version.name, (const char *)params->name,
+ sizeof(drv_version.name));
rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 0de7c6b8..bdb5d6f1 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -87,7 +87,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
- if ((rxmode->enable_scatter) ||
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
(max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
DP_INFO(edev, "Forcing scatter-gather mode\n");
@@ -1466,6 +1466,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+ } else {
+ packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
}
/* Common handling for non-tunnel packets and for inner
@@ -1487,7 +1489,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
ol_flags |= PKT_RX_IP_CKSUM_BAD;
} else {
ol_flags |= PKT_RX_IP_CKSUM_GOOD;
- packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
}
if (CQE_HAS_VLAN(parse_flag) ||
@@ -1631,6 +1632,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
}
+ start_seg++;
m_seg = m_seg->next;
}
@@ -1837,17 +1839,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* offloads. Don't rely on pkt_type marked by Rx, instead use
* tx_ol_flags to decide.
*/
- if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_VXLAN) ||
- ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_MPLSINUDP) ||
- ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_GENEVE)) {
+ tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+
+ if (tunn_flg) {
/* Check against max which is Tunnel IPv6 + ext */
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
break;
- tunn_flg = true;
+
/* First indicate its a tunnel pkt */
bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
@@ -1986,7 +1985,8 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* csum offload is requested then we need to force
* recalculation of L4 tunnel header csum also.
*/
- if (tunn_flg) {
+ if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
+ PKT_TX_TUNNEL_GRE)) {
bd1_bd_flags_bf |=
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index f1d36661..84a834d2 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2016 QLogic Corporation.
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
+ * www.cavium.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -76,8 +76,6 @@
ETH_RSS_VXLAN |\
ETH_RSS_GENEVE)
-#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
-
#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++)
#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++)
#define QEDE_RXTX_MAX(qdev) \
@@ -153,7 +151,8 @@
PKT_TX_VLAN_PKT | \
PKT_TX_TUNNEL_VXLAN | \
PKT_TX_TUNNEL_GENEVE | \
- PKT_TX_TUNNEL_MPLSINUDP)
+ PKT_TX_TUNNEL_MPLSINUDP | \
+ PKT_TX_TUNNEL_GRE)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
diff --git a/drivers/net/ring/meson.build b/drivers/net/ring/meson.build
index e877a4b4..7659b04f 100644
--- a/drivers/net/ring/meson.build
+++ b/drivers/net/ring/meson.build
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
sources = files('rte_eth_ring.c')
install_headers('rte_eth_ring.h')
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index df13c44b..35b837c3 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -60,9 +60,15 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_autoneg = ETH_LINK_FIXED,
};
+static int eth_ring_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static uint16_t
eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -256,18 +262,9 @@ do_eth_dev_ring_create(const char *name,
void **tx_queues_local = NULL;
unsigned i;
- RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
+ PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
numa_node);
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL) {
- rte_errno = ENOMEM;
- goto error;
- }
-
rx_queues_local = rte_zmalloc_socket(name,
sizeof(void *) * nb_rx_queues, 0, numa_node);
if (rx_queues_local == NULL) {
@@ -301,10 +298,8 @@ do_eth_dev_ring_create(const char *name,
* - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
- /* NOTE: we'll replace the data element, of originally allocated eth_dev
- * so the rings are local per-process */
- rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data = eth_dev->data;
data->rx_queues = rx_queues_local;
data->tx_queues = tx_queues_local;
@@ -326,7 +321,6 @@ do_eth_dev_ring_create(const char *name,
data->dev_link = pmd_link;
data->mac_addrs = &internals->address;
- eth_dev->data = data;
eth_dev->dev_ops = &ops;
data->kdrv = RTE_KDRV_NONE;
data->numa_node = numa_node;
@@ -335,6 +329,7 @@ do_eth_dev_ring_create(const char *name,
eth_dev->rx_pkt_burst = eth_ring_rx;
eth_dev->tx_pkt_burst = eth_ring_tx;
+ rte_eth_dev_probing_finish(eth_dev);
*eth_dev_p = eth_dev;
return data->port_id;
@@ -342,7 +337,6 @@ do_eth_dev_ring_create(const char *name,
error:
rte_free(rx_queues_local);
rte_free(tx_queues_local);
- rte_free(data);
rte_free(internals);
return -1;
@@ -459,13 +453,13 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
ret = -EINVAL;
if (!name) {
- RTE_LOG(WARNING, PMD, "command line parameter is empty for ring pmd!\n");
+ PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
goto out;
}
node = strchr(name, ':');
if (!node) {
- RTE_LOG(WARNING, PMD, "could not parse node value from %s\n",
+ PMD_LOG(WARNING, "could not parse node value from %s",
name);
goto out;
}
@@ -475,7 +469,7 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
action = strchr(node, ':');
if (!action) {
- RTE_LOG(WARNING, PMD, "could not parse action value from %s\n",
+ PMD_LOG(WARNING, "could not parse action value from %s",
node);
goto out;
}
@@ -498,7 +492,8 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
info->list[info->count].node = strtol(node, &end, 10);
if ((errno != 0) || (*end != '\0')) {
- RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
+ PMD_LOG(WARNING,
+ "node value %s is unparseable as a number", node);
goto out;
}
@@ -542,14 +537,14 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
- RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
+ PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
if (params == NULL || params[0] == '\0') {
ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
&eth_dev);
if (ret == -1) {
- RTE_LOG(INFO, PMD,
- "Attach to pmd_ring for %s\n", name);
+ PMD_LOG(INFO,
+ "Attach to pmd_ring for %s", name);
ret = eth_dev_ring_create(name, rte_socket_id(),
DEV_ATTACH, &eth_dev);
}
@@ -557,13 +552,13 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
kvlist = rte_kvargs_parse(params, valid_arguments);
if (!kvlist) {
- RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
- " rings-backed ethernet device\n");
+ PMD_LOG(INFO, "Ignoring unsupported parameters when creating"
+ " rings-backed ethernet device");
ret = eth_dev_ring_create(name, rte_socket_id(),
DEV_CREATE, &eth_dev);
if (ret == -1) {
- RTE_LOG(INFO, PMD,
- "Attach to pmd_ring for %s\n",
+ PMD_LOG(INFO,
+ "Attach to pmd_ring for %s",
name);
ret = eth_dev_ring_create(name, rte_socket_id(),
DEV_ATTACH, &eth_dev);
@@ -617,8 +612,8 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
&eth_dev);
if ((ret == -1) &&
(info->list[info->count].action == DEV_CREATE)) {
- RTE_LOG(INFO, PMD,
- "Attach to pmd_ring for %s\n",
+ PMD_LOG(INFO,
+ "Attach to pmd_ring for %s",
name);
ret = eth_dev_ring_create(name,
info->list[info->count].node,
@@ -647,7 +642,7 @@ rte_pmd_ring_remove(struct rte_vdev_device *dev)
struct ring_queue *r = NULL;
uint16_t i;
- RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
+ PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
if (name == NULL)
return -EINVAL;
@@ -675,8 +670,6 @@ rte_pmd_ring_remove(struct rte_vdev_device *dev)
rte_free(eth_dev->data->tx_queues);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
-
rte_eth_dev_release_port(eth_dev);
return 0;
}
@@ -690,3 +683,12 @@ RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
RTE_PMD_REGISTER_PARAM_STRING(net_ring,
ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
+
+RTE_INIT(eth_ring_init_log);
+static void
+eth_ring_init_log(void)
+{
+ eth_ring_logtype = rte_log_register("pmd.net.ring");
+ if (eth_ring_logtype >= 0)
+ rte_log_set_level(eth_ring_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 8a671dd2..3bb41a00 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -46,11 +46,11 @@ else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
# Suppress ICC false positive warning on 'bulk' may be used before its
# value is set
-CFLAGS_sfc_ef10_tx.o += -wd3656
+CFLAGS_sfc_ef10_tx.o += -diag-disable 3656
endif
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
-LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_pci -lrte_pci
#
# List of base driver object files for which
@@ -81,6 +81,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_essb_rx.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c
VPATH += $(SRCDIR)/base
@@ -115,6 +116,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_vpd.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_ev.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_filter.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_image.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mac.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mcdi.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nic.c
@@ -125,5 +127,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_tx.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_vpd.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += hunt_nic.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford2_nic.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c
index 05700c5c..7f89a7bf 100644
--- a/drivers/net/sfc/base/ef10_ev.c
+++ b/drivers/net/sfc/base/ef10_ev.c
@@ -10,7 +10,7 @@
#include "mcdi_mon.h"
#endif
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_QSTATS
#define EFX_EV_QSTAT_INCR(_eep, _stat) \
@@ -549,7 +549,8 @@ ef10_ev_qdestroy(
efx_nic_t *enp = eep->ee_enp;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) efx_mcdi_fini_evq(enp, eep->ee_index);
}
@@ -576,7 +577,7 @@ ef10_ev_qprime(
EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
ERF_DD_EVQ_IND_RPTR,
(rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
- EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
&dword, B_FALSE);
EFX_POPULATE_DWORD_2(dword,
@@ -584,11 +585,11 @@ ef10_ev_qprime(
EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
ERF_DD_EVQ_IND_RPTR,
rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
- EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
&dword, B_FALSE);
} else {
EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
- EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
+ EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
&dword, B_FALSE);
}
@@ -701,13 +702,19 @@ ef10_ev_qmoderate(
EFE_DD_EVQ_IND_TIMER_FLAGS,
ERF_DD_EVQ_IND_TIMER_MODE, mode,
ERF_DD_EVQ_IND_TIMER_VAL, ticks);
- EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
+ EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
eep->ee_index, &dword, 0);
} else {
- EFX_POPULATE_DWORD_2(dword,
+ /*
+ * NOTE: The TMR_REL field introduced in Medford2 is
+ * ignored on earlier EF10 controllers. See bug66418
+ * comment 9 for details.
+ */
+ EFX_POPULATE_DWORD_3(dword,
ERF_DZ_TC_TIMER_MODE, mode,
- ERF_DZ_TC_TIMER_VAL, ticks);
- EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
+ ERF_DZ_TC_TIMER_VAL, ticks,
+ ERF_FZ_TC_TMR_REL_VAL, ticks);
+ EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
eep->ee_index, &dword, 0);
}
}
@@ -742,7 +749,7 @@ ef10_ev_qstats_update(
}
#endif /* EFSYS_OPT_QSTATS */
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
static __checkReturn boolean_t
ef10_ev_rx_packed_stream(
@@ -781,14 +788,25 @@ ef10_ev_rx_packed_stream(
if (new_buffer) {
flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * If both packed stream and equal stride super-buffer
+ * modes are compiled in, in theory credits should be
+ * be maintained for packed stream only, but right now
+ * these modes are not distinguished in the event queue
+ * Rx queue state and it is OK to increment the counter
+ * regardless (it might be event cheaper than branching
+ * since neighbour structure member are updated as well).
+ */
eersp->eers_rx_packed_stream_credits++;
+#endif
eersp->eers_rx_read_ptr++;
}
current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
/* Check for errors that invalidate checksum and L3/L4 fields */
- if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
- /* RX frame truncated (error flag is misnamed) */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
+ /* RX frame truncated */
EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
flags |= EFX_DISCARD;
goto deliver;
@@ -823,7 +841,7 @@ deliver:
return (should_abort);
}
-#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
static __checkReturn boolean_t
ef10_ev_rx(
@@ -857,7 +875,7 @@ ef10_ev_rx(
label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
eersp = &eep->ee_rxq_state[label];
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
/*
* Packed stream events are very different,
* so handle them separately
@@ -867,12 +885,23 @@ ef10_ev_rx(
#endif
size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
+ cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
- l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
- cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
+
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
+ * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
+ * and values for all EF10 controllers.
+ */
+ EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
+
+ l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
/* Drop this event */
@@ -914,8 +943,8 @@ ef10_ev_rx(
last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
/* Check for errors that invalidate checksum and L3/L4 fields */
- if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
- /* RX frame truncated (error flag is misnamed) */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
+ /* RX frame truncated */
EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
flags |= EFX_DISCARD;
goto deliver;
@@ -951,10 +980,22 @@ ef10_ev_rx(
flags |= EFX_CKSUM_IPV4;
}
- if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
+ * only 2 bits wide on Medford2. Check it is safe to use the
+ * Medford2 field and values for all EF10 controllers.
+ */
+ EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
+ ESF_DE_RX_L4_CLASS_LBN);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
+ ESE_DE_L4_CLASS_UNKNOWN);
+
+ if (l4_class == ESE_FZ_L4_CLASS_TCP) {
EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
flags |= EFX_PKT_TCP;
- } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
flags |= EFX_PKT_UDP;
} else {
@@ -966,10 +1007,22 @@ ef10_ev_rx(
case ESE_DZ_L3_CLASS_IP6_FRAG:
flags |= EFX_PKT_IPV6;
- if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
+ * only 2 bits wide on Medford2. Check it is safe to use the
+ * Medford2 field and values for all EF10 controllers.
+ */
+ EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
+ ESF_DE_RX_L4_CLASS_LBN);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
+ ESE_DE_L4_CLASS_UNKNOWN);
+
+ if (l4_class == ESE_FZ_L4_CLASS_TCP) {
EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
flags |= EFX_PKT_TCP;
- } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
flags |= EFX_PKT_UDP;
} else {
@@ -1322,8 +1375,9 @@ ef10_ev_rxlabel_init(
__in efx_rxq_type_t type)
{
efx_evq_rxq_state_t *eersp;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
+ boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
#endif
_NOTE(ARGUNUSED(type))
@@ -1345,9 +1399,11 @@ ef10_ev_rxlabel_init(
eersp->eers_rx_read_ptr = 0;
#endif
eersp->eers_rx_mask = erp->er_mask;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
eersp->eers_rx_stream_npackets = 0;
- eersp->eers_rx_packed_stream = packed_stream;
+ eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
if (packed_stream) {
eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
@@ -1381,11 +1437,13 @@ ef10_ev_rxlabel_fini(
eersp->eers_rx_read_ptr = 0;
eersp->eers_rx_mask = 0;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
eersp->eers_rx_stream_npackets = 0;
eersp->eers_rx_packed_stream = B_FALSE;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
eersp->eers_rx_packed_stream_credits = 0;
#endif
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c
index 27b59987..ae872853 100644
--- a/drivers/net/sfc/base/ef10_filter.c
+++ b/drivers/net/sfc/base/ef10_filter.c
@@ -7,7 +7,7 @@
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_FILTER
@@ -95,7 +95,8 @@ ef10_filter_init(
ef10_filter_table_t *eftp;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
#define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
@@ -118,6 +119,10 @@ ef10_filter_init(
MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_VNI_OR_VSID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_LOC_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST ==
MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST ==
@@ -150,7 +155,8 @@ ef10_filter_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_filter.ef_ef10_filter_table != NULL) {
EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t),
@@ -166,17 +172,24 @@ efx_mcdi_filter_op_add(
__inout ef10_filter_handle_t *handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN,
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_V3_IN_LEN,
MC_CMD_FILTER_OP_EXT_OUT_LEN)];
+ efx_filter_match_flags_t match_flags;
efx_rc_t rc;
memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FILTER_OP;
req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN;
+ req.emr_in_length = MC_CMD_FILTER_OP_V3_IN_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN;
+ /*
+ * Remove match flag for encapsulated filters that does not correspond
+ * to the MCDI match flags
+ */
+ match_flags = spec->efs_match_flags & ~EFX_FILTER_MATCH_ENCAP_TYPE;
+
switch (filter_op) {
case MC_CMD_FILTER_OP_IN_OP_REPLACE:
MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO,
@@ -197,11 +210,16 @@ efx_mcdi_filter_op_add(
MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID,
EVB_PORT_ID_ASSIGNED);
MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS,
- spec->efs_match_flags);
- MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
- MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST);
- MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE,
- spec->efs_dmaq_id);
+ match_flags);
+ if (spec->efs_dmaq_id == EFX_FILTER_SPEC_RX_DMAQ_ID_DROP) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
+ MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP);
+ } else {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
+ MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE,
+ spec->efs_dmaq_id);
+ }
#if EFSYS_OPT_RX_SCALE
if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
@@ -290,18 +308,45 @@ efx_mcdi_filter_op_add(
rc = EINVAL;
goto fail2;
}
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_VNI_OR_VSID),
+ spec->efs_vni_or_vsid, EFX_VNI_OR_VSID_LEN);
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_IFRM_DST_MAC),
+ spec->efs_ifrm_loc_mac, EFX_MAC_ADDR_LEN);
+ }
+
+ /*
+ * Set the "MARK" or "FLAG" action for all packets matching this filter
+ * if necessary (only useful with equal stride packed stream Rx mode
+ * which provide the information in pseudo-header).
+ * These actions require MC_CMD_FILTER_OP_V3_IN msgrequest.
+ */
+ if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) &&
+ (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION,
+ MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_MARK_VALUE,
+ spec->efs_mark);
+ } else if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION,
+ MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG);
}
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail3;
+ goto fail4;
}
if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) {
rc = EMSGSIZE;
- goto fail4;
+ goto fail5;
}
handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO);
@@ -309,6 +354,8 @@ efx_mcdi_filter_op_add(
return (0);
+fail5:
+ EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
@@ -413,6 +460,12 @@ ef10_filter_equal(
return (B_FALSE);
if (left->efs_encap_type != right->efs_encap_type)
return (B_FALSE);
+ if (memcmp(left->efs_vni_or_vsid, right->efs_vni_or_vsid,
+ EFX_VNI_OR_VSID_LEN))
+ return (B_FALSE);
+ if (memcmp(left->efs_ifrm_loc_mac, right->efs_ifrm_loc_mac,
+ EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
return (B_TRUE);
@@ -495,7 +548,8 @@ ef10_filter_restore(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) {
@@ -570,7 +624,8 @@ ef10_filter_add_internal(
boolean_t locked = B_FALSE;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
hash = ef10_filter_hash(spec);
@@ -842,7 +897,8 @@ ef10_filter_delete(
boolean_t locked = B_FALSE;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
hash = ef10_filter_hash(spec);
@@ -890,6 +946,7 @@ efx_mcdi_get_parser_disp_info(
__in efx_nic_t *enp,
__out_ecount(buffer_length) uint32_t *buffer,
__in size_t buffer_length,
+ __in boolean_t encap,
__out size_t *list_lengthp)
{
efx_mcdi_req_t req;
@@ -906,7 +963,8 @@ efx_mcdi_get_parser_disp_info(
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;
- MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP,
+ MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, encap ?
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
efx_mcdi_execute(enp, &req);
@@ -966,28 +1024,76 @@ ef10_filter_supported_filters(
__in size_t buffer_length,
__out size_t *list_lengthp)
{
-
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
size_t mcdi_list_length;
+ size_t mcdi_encap_list_length;
size_t list_length;
uint32_t i;
+ uint32_t next_buf_idx;
+ size_t next_buf_length;
efx_rc_t rc;
+ boolean_t no_space = B_FALSE;
efx_filter_match_flags_t all_filter_flags =
(EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST |
EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT |
EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID |
EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_VNI_OR_VSID |
+ EFX_FILTER_MATCH_IFRM_LOC_MAC |
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST |
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST |
+ EFX_FILTER_MATCH_ENCAP_TYPE |
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST |
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST);
- rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length,
- &mcdi_list_length);
+ /*
+ * Two calls to MC_CMD_GET_PARSER_DISP_INFO are needed: one to get the
+ * list of supported filters for ordinary packets, and then another to
+ * get the list of supported filters for encapsulated packets. To
+ * distinguish the second list from the first, the
+ * EFX_FILTER_MATCH_ENCAP_TYPE flag is added to each filter for
+ * encapsulated packets.
+ */
+ rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, B_FALSE,
+ &mcdi_list_length);
if (rc != 0) {
- if (rc == ENOSPC) {
- /* Pass through mcdi_list_length for the list length */
- *list_lengthp = mcdi_list_length;
+ if (rc == ENOSPC)
+ no_space = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if (no_space) {
+ next_buf_idx = 0;
+ next_buf_length = 0;
+ } else {
+ EFSYS_ASSERT(mcdi_list_length <= buffer_length);
+ next_buf_idx = mcdi_list_length;
+ next_buf_length = buffer_length - mcdi_list_length;
+ }
+
+ if (encp->enc_tunnel_encapsulations_supported != 0) {
+ rc = efx_mcdi_get_parser_disp_info(enp, &buffer[next_buf_idx],
+ next_buf_length, B_TRUE, &mcdi_encap_list_length);
+ if (rc != 0) {
+ if (rc == ENOSPC)
+ no_space = B_TRUE;
+ else
+ goto fail2;
+ } else {
+ for (i = next_buf_idx;
+ i < next_buf_idx + mcdi_encap_list_length; i++)
+ buffer[i] |= EFX_FILTER_MATCH_ENCAP_TYPE;
}
- goto fail1;
+ } else {
+ mcdi_encap_list_length = 0;
+ }
+
+ if (no_space) {
+ *list_lengthp = mcdi_list_length + mcdi_encap_list_length;
+ rc = ENOSPC;
+ goto fail3;
}
/*
@@ -1000,9 +1106,10 @@ ef10_filter_supported_filters(
* of the matches is preserved as they are ordered from highest to
* lowest priority.
*/
- EFSYS_ASSERT(mcdi_list_length <= buffer_length);
+ EFSYS_ASSERT(mcdi_list_length + mcdi_encap_list_length <=
+ buffer_length);
list_length = 0;
- for (i = 0; i < mcdi_list_length; i++) {
+ for (i = 0; i < mcdi_list_length + mcdi_encap_list_length; i++) {
if ((buffer[i] & ~all_filter_flags) == 0) {
buffer[list_length] = buffer[i];
list_length++;
@@ -1013,6 +1120,10 @@ ef10_filter_supported_filters(
return (0);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -1636,4 +1747,4 @@ ef10_filter_default_rxq_clear(
#endif /* EFSYS_OPT_FILTER */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_image.c b/drivers/net/sfc/base/ef10_image.c
new file mode 100644
index 00000000..6fb7e476
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_image.c
@@ -0,0 +1,885 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#if EFSYS_OPT_IMAGE_LAYOUT
+
+/*
+ * Utility routines to support limited parsing of ASN.1 tags. This is not a
+ * general purpose ASN.1 parser, but is sufficient to locate the required
+ * objects in a signed image with CMS headers.
+ */
+
+/* DER encodings for ASN.1 tags (see ITU-T X.690) */
+#define ASN1_TAG_INTEGER (0x02)
+#define ASN1_TAG_OCTET_STRING (0x04)
+#define ASN1_TAG_OBJ_ID (0x06)
+#define ASN1_TAG_SEQUENCE (0x30)
+#define ASN1_TAG_SET (0x31)
+
+#define ASN1_TAG_IS_PRIM(tag) ((tag & 0x20) == 0)
+
+#define ASN1_TAG_PRIM_CONTEXT(n) (0x80 + (n))
+#define ASN1_TAG_CONS_CONTEXT(n) (0xA0 + (n))
+
+typedef struct efx_asn1_cursor_s {
+ uint8_t *buffer;
+ uint32_t length;
+
+ uint8_t tag;
+ uint32_t hdr_size;
+ uint32_t val_size;
+} efx_asn1_cursor_t;
+
+
+/* Parse header of DER encoded ASN.1 TLV and match tag */
+static __checkReturn efx_rc_t
+efx_asn1_parse_header_match_tag(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL || cursor->buffer == NULL || cursor->length < 2) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ cursor->tag = cursor->buffer[0];
+ if (cursor->tag != tag) {
+ /* Tag not matched */
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ if ((cursor->tag & 0x1F) == 0x1F) {
+ /* Long tag format not used in CMS syntax */
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if ((cursor->buffer[1] & 0x80) == 0) {
+ /* Short form: length is 0..127 */
+ cursor->hdr_size = 2;
+ cursor->val_size = cursor->buffer[1];
+ } else {
+ /* Long form: length encoded as [0x80+nbytes][length bytes] */
+ uint32_t nbytes = cursor->buffer[1] & 0x7F;
+ uint32_t offset;
+
+ if (nbytes == 0) {
+ /* Indefinite length not allowed in DER encoding */
+ rc = EINVAL;
+ goto fail4;
+ }
+ if (2 + nbytes > cursor->length) {
+ /* Header length overflows image buffer */
+ rc = EINVAL;
+ goto fail6;
+ }
+ if (nbytes > sizeof (uint32_t)) {
+ /* Length encoding too big */
+ rc = E2BIG;
+ goto fail5;
+ }
+ cursor->hdr_size = 2 + nbytes;
+ cursor->val_size = 0;
+ for (offset = 2; offset < cursor->hdr_size; offset++) {
+ cursor->val_size =
+ (cursor->val_size << 8) | cursor->buffer[offset];
+ }
+ }
+
+ if ((cursor->hdr_size + cursor->val_size) > cursor->length) {
+ /* Length overflows image buffer */
+ rc = E2BIG;
+ goto fail7;
+ }
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Enter nested ASN.1 TLV (contained in value of current TLV) */
+static __checkReturn efx_rc_t
+efx_asn1_enter_tag(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (ASN1_TAG_IS_PRIM(tag)) {
+ /* Cannot enter a primitive tag */
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail3;
+ }
+
+ /* Limit cursor range to nested TLV */
+ cursor->buffer += cursor->hdr_size;
+ cursor->length = cursor->val_size;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Check that the current ASN.1 TLV matches the given tag and value.
+ * Advance cursor to next TLV on a successful match.
+ */
+static __checkReturn efx_rc_t
+efx_asn1_match_tag_value(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag,
+ __in const void *valp,
+ __in uint32_t val_size)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail2;
+ }
+ if (cursor->val_size != val_size) {
+ /* Value size is different */
+ rc = EINVAL;
+ goto fail3;
+ }
+ if (memcmp(cursor->buffer + cursor->hdr_size, valp, val_size) != 0) {
+ /* Value content is different */
+ rc = EINVAL;
+ goto fail4;
+ }
+ cursor->buffer += cursor->hdr_size + cursor->val_size;
+ cursor->length -= cursor->hdr_size + cursor->val_size;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Advance cursor to next TLV */
+static __checkReturn efx_rc_t
+efx_asn1_skip_tag(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail2;
+ }
+ cursor->buffer += cursor->hdr_size + cursor->val_size;
+ cursor->length -= cursor->hdr_size + cursor->val_size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Return pointer to value octets and value size from current TLV */
+static __checkReturn efx_rc_t
+efx_asn1_get_tag_value(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag,
+ __out uint8_t **valp,
+ __out uint32_t *val_sizep)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL || valp == NULL || val_sizep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail2;
+ }
+ *valp = cursor->buffer + cursor->hdr_size;
+ *val_sizep = cursor->val_size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Utility routines for parsing CMS headers (see RFC2315, PKCS#7)
+ */
+
+/* OID 1.2.840.113549.1.7.2 */
+static const uint8_t PKCS7_SignedData[] =
+{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x02 };
+
+/* OID 1.2.840.113549.1.7.1 */
+static const uint8_t PKCS7_Data[] =
+{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x01 };
+
+/* SignedData structure version */
+static const uint8_t SignedData_Version[] =
+{ 0x03 };
+
+/*
+ * Check for a valid image in signed image format. This uses CMS syntax
+ * (see RFC2315, PKCS#7) to provide signatures, and certificates required
+ * to validate the signatures. The encapsulated content is in unsigned image
+ * format (reflash header, image code, trailer checksum).
+ */
+static __checkReturn efx_rc_t
+efx_check_signed_image_header(
+ __in void *bufferp,
+ __in uint32_t buffer_size,
+ __out uint32_t *content_offsetp,
+ __out uint32_t *content_lengthp)
+{
+ efx_asn1_cursor_t cursor;
+ uint8_t *valp;
+ uint32_t val_size;
+ efx_rc_t rc;
+
+ if (content_offsetp == NULL || content_lengthp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ cursor.buffer = (uint8_t *)bufferp;
+ cursor.length = buffer_size;
+
+ /* ContextInfo */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE);
+ if (rc != 0)
+ goto fail2;
+
+ /* ContextInfo.contentType */
+ rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID,
+ PKCS7_SignedData, sizeof (PKCS7_SignedData));
+ if (rc != 0)
+ goto fail3;
+
+ /* ContextInfo.content */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0));
+ if (rc != 0)
+ goto fail4;
+
+ /* SignedData */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE);
+ if (rc != 0)
+ goto fail5;
+
+ /* SignedData.version */
+ rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_INTEGER,
+ SignedData_Version, sizeof (SignedData_Version));
+ if (rc != 0)
+ goto fail6;
+
+ /* SignedData.digestAlgorithms */
+ rc = efx_asn1_skip_tag(&cursor, ASN1_TAG_SET);
+ if (rc != 0)
+ goto fail7;
+
+ /* SignedData.encapContentInfo */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE);
+ if (rc != 0)
+ goto fail8;
+
+ /* SignedData.encapContentInfo.econtentType */
+ rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID,
+ PKCS7_Data, sizeof (PKCS7_Data));
+ if (rc != 0)
+ goto fail9;
+
+ /* SignedData.encapContentInfo.econtent */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0));
+ if (rc != 0)
+ goto fail10;
+
+ /*
+ * The octet string contains the image header, image code bytes and
+ * image trailer CRC (same as unsigned image layout).
+ */
+ valp = NULL;
+ val_size = 0;
+ rc = efx_asn1_get_tag_value(&cursor, ASN1_TAG_OCTET_STRING,
+ &valp, &val_size);
+ if (rc != 0)
+ goto fail11;
+
+ if ((valp == NULL) || (val_size == 0)) {
+ rc = EINVAL;
+ goto fail12;
+ }
+ if (valp < (uint8_t *)bufferp) {
+ rc = EINVAL;
+ goto fail13;
+ }
+ if ((valp + val_size) > ((uint8_t *)bufferp + buffer_size)) {
+ rc = EINVAL;
+ goto fail14;
+ }
+
+ *content_offsetp = (uint32_t)(valp - (uint8_t *)bufferp);
+ *content_lengthp = val_size;
+
+ return (0);
+
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_check_unsigned_image(
+ __in void *bufferp,
+ __in uint32_t buffer_size)
+{
+ efx_image_header_t *header;
+ efx_image_trailer_t *trailer;
+ uint32_t crc;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) == EFX_IMAGE_HEADER_SIZE);
+ EFX_STATIC_ASSERT(sizeof (*trailer) == EFX_IMAGE_TRAILER_SIZE);
+
+ /* Must have at least enough space for required image header fields */
+ if (buffer_size < (EFX_FIELD_OFFSET(efx_image_header_t, eih_size) +
+ sizeof (header->eih_size))) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ header = (efx_image_header_t *)bufferp;
+
+ if (header->eih_magic != EFX_IMAGE_HEADER_MAGIC) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /*
+ * Check image header version is same or higher than lowest required
+ * version.
+ */
+ if (header->eih_version < EFX_IMAGE_HEADER_VERSION) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Buffer must have space for image header, code and image trailer. */
+ if (buffer_size < (header->eih_size + header->eih_code_size +
+ EFX_IMAGE_TRAILER_SIZE)) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ /* Check CRC from image buffer matches computed CRC. */
+ trailer = (efx_image_trailer_t *)((uint8_t *)header +
+ header->eih_size + header->eih_code_size);
+
+ crc = efx_crc32_calculate(0, (uint8_t *)header,
+ (header->eih_size + header->eih_code_size));
+
+ if (trailer->eit_crc != crc) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_check_reflash_image(
+ __in void *bufferp,
+ __in uint32_t buffer_size,
+ __out efx_image_info_t *infop)
+{
+ efx_image_format_t format = EFX_IMAGE_FORMAT_NO_IMAGE;
+ uint32_t image_offset;
+ uint32_t image_size;
+ void *imagep;
+ efx_rc_t rc;
+
+
+ EFSYS_ASSERT(infop != NULL);
+ if (infop == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ memset(infop, 0, sizeof (*infop));
+
+ if (bufferp == NULL || buffer_size == 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /*
+ * Check if the buffer contains an image in signed format, and if so,
+ * locate the image header.
+ */
+ rc = efx_check_signed_image_header(bufferp, buffer_size,
+ &image_offset, &image_size);
+ if (rc == 0) {
+ /*
+ * Buffer holds signed image format. Check that the encapsulated
+ * content is in unsigned image format.
+ */
+ format = EFX_IMAGE_FORMAT_SIGNED;
+ } else {
+ /* Check if the buffer holds image in unsigned image format */
+ format = EFX_IMAGE_FORMAT_UNSIGNED;
+ image_offset = 0;
+ image_size = buffer_size;
+ }
+ if (image_offset + image_size > buffer_size) {
+ rc = E2BIG;
+ goto fail3;
+ }
+ imagep = (uint8_t *)bufferp + image_offset;
+
+ /* Check unsigned image layout (image header, code, image trailer) */
+ rc = efx_check_unsigned_image(imagep, image_size);
+ if (rc != 0)
+ goto fail4;
+
+ /* Return image details */
+ infop->eii_format = format;
+ infop->eii_imagep = bufferp;
+ infop->eii_image_size = buffer_size;
+ infop->eii_headerp = (efx_image_header_t *)imagep;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+ infop->eii_format = EFX_IMAGE_FORMAT_INVALID;
+ infop->eii_imagep = NULL;
+ infop->eii_image_size = 0;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_build_signed_image_write_buffer(
+ __out uint8_t *bufferp,
+ __in uint32_t buffer_size,
+ __in efx_image_info_t *infop,
+ __out efx_image_header_t **headerpp)
+{
+ signed_image_chunk_hdr_t chunk_hdr;
+ uint32_t hdr_offset;
+ struct {
+ uint32_t offset;
+ uint32_t size;
+ } cms_header, image_header, code, image_trailer, signature;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((infop != NULL) && (headerpp != NULL));
+
+ if ((bufferp == NULL) || (buffer_size == 0) ||
+ (infop == NULL) || (headerpp == NULL)) {
+ /* Invalid arguments */
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((infop->eii_format != EFX_IMAGE_FORMAT_SIGNED) ||
+ (infop->eii_imagep == NULL) ||
+ (infop->eii_headerp == NULL) ||
+ ((uint8_t *)infop->eii_headerp < (uint8_t *)infop->eii_imagep) ||
+ (infop->eii_image_size < EFX_IMAGE_HEADER_SIZE) ||
+ ((size_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep) >
+ (infop->eii_image_size - EFX_IMAGE_HEADER_SIZE))) {
+ /* Invalid image info */
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* Locate image chunks in original signed image */
+ cms_header.offset = 0;
+ cms_header.size =
+ (uint32_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep);
+ if ((cms_header.size > buffer_size) ||
+ (cms_header.offset > (buffer_size - cms_header.size))) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ image_header.offset = cms_header.offset + cms_header.size;
+ image_header.size = infop->eii_headerp->eih_size;
+ if ((image_header.size > buffer_size) ||
+ (image_header.offset > (buffer_size - image_header.size))) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ code.offset = image_header.offset + image_header.size;
+ code.size = infop->eii_headerp->eih_code_size;
+ if ((code.size > buffer_size) ||
+ (code.offset > (buffer_size - code.size))) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ image_trailer.offset = code.offset + code.size;
+ image_trailer.size = EFX_IMAGE_TRAILER_SIZE;
+ if ((image_trailer.size > buffer_size) ||
+ (image_trailer.offset > (buffer_size - image_trailer.size))) {
+ rc = EINVAL;
+ goto fail6;
+ }
+
+ signature.offset = image_trailer.offset + image_trailer.size;
+ signature.size = (uint32_t)(infop->eii_image_size - signature.offset);
+ if ((signature.size > buffer_size) ||
+ (signature.offset > (buffer_size - signature.size))) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ EFSYS_ASSERT3U(infop->eii_image_size, ==, cms_header.size +
+ image_header.size + code.size + image_trailer.size +
+ signature.size);
+
+ /* BEGIN CSTYLED */
+ /*
+ * Build signed image partition, inserting chunk headers.
+ *
+ * Signed Image: Image in NVRAM partition:
+ *
+ * +-----------------+ +-----------------+
+ * | CMS header | | mcfw.update |<----+
+ * +-----------------+ | | |
+ * | reflash header | +-----------------+ |
+ * +-----------------+ | chunk header: |-->--|-+
+ * | mcfw.update | | REFLASH_TRAILER | | |
+ * | | +-----------------+ | |
+ * +-----------------+ +-->| CMS header | | |
+ * | reflash trailer | | +-----------------+ | |
+ * +-----------------+ | | chunk header: |->-+ | |
+ * | signature | | | REFLASH_HEADER | | | |
+ * +-----------------+ | +-----------------+ | | |
+ * | | reflash header |<--+ | |
+ * | +-----------------+ | |
+ * | | chunk header: |-->--+ |
+ * | | IMAGE | |
+ * | +-----------------+ |
+ * | | reflash trailer |<------+
+ * | +-----------------+
+ * | | chunk header: |
+ * | | SIGNATURE |->-+
+ * | +-----------------+ |
+ * | | signature |<--+
+ * | +-----------------+
+ * | | ...unused... |
+ * | +-----------------+
+ * +-<-| chunk header: |
+ * >-->| CMS_HEADER |
+ * +-----------------+
+ *
+ * Each chunk header gives the partition offset and length of the image
+ * chunk's data. The image chunk data is immediately followed by the
+ * chunk header for the next chunk.
+ *
+ * The data chunk for the firmware code must be at the start of the
+ * partition (needed for the bootloader). The first chunk header in the
+ * chain (for the CMS header) is stored at the end of the partition. The
+ * chain of chunk headers maintains the same logical order of image
+ * chunks as the original signed image file. This set of constraints
+ * results in the layout used for the data chunks and chunk headers.
+ */
+ /* END CSTYLED */
+ memset(bufferp, buffer_size, 0xFF);
+
+ EFX_STATIC_ASSERT(sizeof (chunk_hdr) == SIGNED_IMAGE_CHUNK_HDR_LEN);
+ memset(&chunk_hdr, 0, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ /*
+ * CMS header
+ */
+ if (buffer_size < SIGNED_IMAGE_CHUNK_HDR_LEN) {
+ rc = ENOSPC;
+ goto fail8;
+ }
+ hdr_offset = buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN;
+
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_CMS_HEADER;
+ chunk_hdr.offset = code.size + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = cms_header.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, sizeof (chunk_hdr));
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail9;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + cms_header.offset,
+ cms_header.size);
+
+ /*
+ * Image header
+ */
+ hdr_offset = chunk_hdr.offset + chunk_hdr.len;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail10;
+ }
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_HEADER;
+ chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = image_header.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail11;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + image_header.offset,
+ image_header.size);
+
+ *headerpp = (efx_image_header_t *)(bufferp + chunk_hdr.offset);
+
+ /*
+ * Firmware code
+ */
+ hdr_offset = chunk_hdr.offset + chunk_hdr.len;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail12;
+ }
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_IMAGE;
+ chunk_hdr.offset = 0;
+ chunk_hdr.len = code.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail13;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + code.offset,
+ code.size);
+
+ /*
+ * Image trailer (CRC)
+ */
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_TRAILER;
+ chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = image_trailer.size;
+
+ hdr_offset = code.size;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail14;
+ }
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail15;
+ }
+ memcpy((uint8_t *)bufferp + chunk_hdr.offset,
+ infop->eii_imagep + image_trailer.offset,
+ image_trailer.size);
+
+ /*
+ * Signature
+ */
+ hdr_offset = chunk_hdr.offset + chunk_hdr.len;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail16;
+ }
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_SIGNATURE;
+ chunk_hdr.offset = chunk_hdr.offset + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = signature.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail17;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + signature.offset,
+ signature.size);
+
+ return (0);
+
+fail17:
+ EFSYS_PROBE(fail17);
+fail16:
+ EFSYS_PROBE(fail16);
+fail15:
+ EFSYS_PROBE(fail15);
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+#endif /* EFSYS_OPT_IMAGE_LAYOUT */
+
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h
index e79f4d53..4751faf1 100644
--- a/drivers/net/sfc/base/ef10_impl.h
+++ b/drivers/net/sfc/base/ef10_impl.h
@@ -11,13 +11,27 @@
extern "C" {
#endif
-#if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD)
-#define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS)
-#elif EFSYS_OPT_HUNTINGTON
-#define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS
-#elif EFSYS_OPT_MEDFORD
-#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS
-#endif
+
+/* Number of hardware PIO buffers (for compile-time resource dimensions) */
+#define EF10_MAX_PIOBUF_NBUFS (16)
+
+#if EFSYS_OPT_HUNTINGTON
+# if (EF10_MAX_PIOBUF_NBUFS < HUNT_PIOBUF_NBUFS)
+# error "EF10_MAX_PIOBUF_NBUFS too small"
+# endif
+#endif /* EFSYS_OPT_HUNTINGTON */
+#if EFSYS_OPT_MEDFORD
+# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD_PIOBUF_NBUFS)
+# error "EF10_MAX_PIOBUF_NBUFS too small"
+# endif
+#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD2_PIOBUF_NBUFS)
+# error "EF10_MAX_PIOBUF_NBUFS too small"
+# endif
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+
/*
* FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
@@ -742,6 +756,7 @@ extern void
ef10_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t tcp_mss,
__out_ecount(count) efx_desc_t *edp,
@@ -753,6 +768,11 @@ ef10_tx_qdesc_vlantci_create(
__in uint16_t vlan_tci,
__out efx_desc_t *edp);
+extern void
+ef10_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp);
#if EFSYS_OPT_QSTATS
@@ -947,13 +967,15 @@ extern void
ef10_rx_qenable(
__in efx_rxq_t *erp);
+union efx_rxq_type_data_u;
+
extern __checkReturn efx_rc_t
ef10_rx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const union efx_rxq_type_data_u *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
@@ -1131,26 +1153,38 @@ efx_mcdi_get_clock(
extern __checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp);
+
+extern __checkReturn efx_rc_t
efx_mcdi_get_vector_cfg(
__in efx_nic_t *enp,
__out_opt uint32_t *vec_basep,
__out_opt uint32_t *pf_nvecp,
__out_opt uint32_t *vf_nvecp);
-extern __checkReturn efx_rc_t
-ef10_get_datapath_caps(
- __in efx_nic_t *enp);
-
extern __checkReturn efx_rc_t
ef10_get_privilege_mask(
__in efx_nic_t *enp,
__out uint32_t *maskp);
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
extern __checkReturn efx_rc_t
-ef10_external_port_mapping(
+efx_mcdi_get_nic_global(
__in efx_nic_t *enp,
- __in uint32_t port,
- __out uint8_t *external_portp);
+ __in uint32_t key,
+ __out uint32_t *valuep);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_set_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __in uint32_t value);
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
+
#if EFSYS_OPT_RX_PACKED_STREAM
@@ -1182,6 +1216,16 @@ ef10_external_port_mapping(
#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+/*
+ * Maximum DMA length and buffer stride alignment.
+ * (see SF-119419-TC, 3.2)
+ */
+#define EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT 64
+
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c
index f79c44e3..1ffe266b 100644
--- a/drivers/net/sfc/base/ef10_intr.c
+++ b/drivers/net/sfc/base/ef10_intr.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
__checkReturn efx_rc_t
ef10_intr_init(
@@ -56,7 +56,8 @@ efx_mcdi_trigger_interrupt(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (level >= enp->en_nic_cfg.enc_intr_limit) {
rc = EINVAL;
@@ -129,7 +130,8 @@ ef10_intr_status_line(
efx_dword_t dword;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Read the queue mask and implicitly acknowledge the interrupt. */
EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE);
@@ -147,7 +149,8 @@ ef10_intr_status_message(
__out boolean_t *fatalp)
{
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
_NOTE(ARGUNUSED(enp, message))
@@ -170,4 +173,4 @@ ef10_intr_fini(
_NOTE(ARGUNUSED(enp))
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c
index db7692ee..1031e836 100644
--- a/drivers/net/sfc/base/ef10_mac.c
+++ b/drivers/net/sfc/base/ef10_mac.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
__checkReturn efx_rc_t
ef10_mac_poll(
@@ -356,7 +356,8 @@ ef10_mac_multicast_list_set(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail1;
@@ -522,8 +523,44 @@ ef10_mac_stats_get_mask(
goto fail6;
}
+ if (encp->enc_fec_counters) {
+ const struct efx_mac_stats_range ef10_fec[] = {
+ { EFX_MAC_FEC_UNCORRECTED_ERRORS,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3 },
+ };
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_fec, EFX_ARRAY_SIZE(ef10_fec))) != 0)
+ goto fail7;
+ }
+
+ if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V4) {
+ const struct efx_mac_stats_range ef10_rxdp_sdt[] = {
+ { EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC,
+ EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_rxdp_sdt, EFX_ARRAY_SIZE(ef10_rxdp_sdt))) != 0)
+ goto fail8;
+ }
+
+ if (encp->enc_hlb_counters) {
+ const struct efx_mac_stats_range ef10_hlb[] = {
+ { EFX_MAC_RXDP_HLB_IDLE, EFX_MAC_RXDP_HLB_TIMEOUT },
+ };
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_hlb, EFX_ARRAY_SIZE(ef10_hlb))) != 0)
+ goto fail9;
+ }
+
return (0);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
fail6:
EFSYS_PROBE(fail6);
fail5:
@@ -551,16 +588,45 @@ ef10_mac_stats_update(
__inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
__inout_opt uint32_t *generationp)
{
- efx_qword_t value;
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_qword_t generation_start;
efx_qword_t generation_end;
+ efx_qword_t value;
+ efx_rc_t rc;
- _NOTE(ARGUNUSED(enp))
+ /*
+ * The MAC_STATS contain start and end generation counters used to
+ * detect when the DMA buffer has been updated during stats decode.
+ * All stats counters are 64bit unsigned values.
+ *
+ * Siena-compatible MAC stats contain MC_CMD_MAC_NSTATS 64bit counters.
+ * The generation end counter is at index MC_CMD_MAC_GENERATION_END
+ * (same as MC_CMD_MAC_NSTATS-1).
+ *
+ * Medford2 and later use a larger DMA buffer: MAC_STATS_NUM_STATS from
+ * MC_CMD_GET_CAPABILITIES_V4_OUT reports the number of 64bit counters.
+ *
+ * Firmware writes the generation end counter as the last counter in the
+ * DMA buffer. Do not use MC_CMD_MAC_GENERATION_END, as that is only
+ * correct for legacy Siena-compatible MAC stats.
+ */
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
+ /* MAC stats count too small for legacy MAC stats */
+ rc = ENOSPC;
+ goto fail1;
+ }
+ if (EFSYS_MEM_SIZE(esmp) <
+ (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) {
+ /* DMA buffer too small */
+ rc = ENOSPC;
+ goto fail2;
+ }
/* Read END first so we don't race with the MC */
- EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
- EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
- &generation_end);
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
+ EF10_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1),
+ &generation_end);
EFSYS_MEM_READ_BARRIER();
/* TX */
@@ -851,7 +917,109 @@ ef10_mac_stats_update(
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value);
- EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V2)
+ goto done;
+
+ /* FEC */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_UNCORRECTED_ERRORS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_UNCORRECTED_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_ERRORS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3]),
+ &value);
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V3)
+ goto done;
+
+ /* CTPIO exceptions */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_BUSY_FALLBACK]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_LONG_WRITE_SUCCESS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_MISSING_DBELL_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_OVERFLOW_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_OVERFLOW_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNDERFLOW_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_TIMEOUT_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_TIMEOUT_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_NONCONTIG_WR_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FRM_CLOBBER_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_INVALID_WR_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_INVALID_WR_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_RUNT_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_RUNT_FALLBACK]), &value);
+
+ /* CTPIO per-port stats */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_SUCCESS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_SUCCESS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FALLBACK]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_POISON, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_POISON]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_ERASE, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_ERASE]), &value);
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V4)
+ goto done;
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC]),
+ &value);
+
+ /* Head-of-line blocking */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_IDLE, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_IDLE]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_TIMEOUT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_TIMEOUT]), &value);
+
+done:
+ /* Read START generation counter */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
EFSYS_MEM_READ_BARRIER();
EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
&generation_start);
@@ -866,8 +1034,15 @@ ef10_mac_stats_update(
*generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
#endif /* EFSYS_OPT_MAC_STATS */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_mcdi.c b/drivers/net/sfc/base/ef10_mcdi.c
index 1f9e573f..8a3fc3b4 100644
--- a/drivers/net/sfc/base/ef10_mcdi.c
+++ b/drivers/net/sfc/base/ef10_mcdi.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_MCDI
@@ -28,7 +28,8 @@ ef10_mcdi_init(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);
/*
@@ -135,7 +136,8 @@ ef10_mcdi_send_request(
unsigned int pos;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Write the header */
for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {
@@ -186,13 +188,17 @@ ef10_mcdi_read_response(
{
const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
efsys_mem_t *esmp = emtp->emt_dma_mem;
- unsigned int pos;
+ unsigned int pos = 0;
efx_dword_t data;
+ size_t remaining = length;
+
+ while (remaining > 0) {
+ size_t chunk = MIN(remaining, sizeof (data));
- for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
EFSYS_MEM_READD(esmp, offset + pos, &data);
- memcpy((uint8_t *)bufferp + pos, &data,
- MIN(sizeof (data), length - pos));
+ memcpy((uint8_t *)bufferp + pos, &data, chunk);
+ pos += chunk;
+ remaining -= chunk;
}
}
@@ -254,7 +260,8 @@ ef10_mcdi_feature_supported(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* Use privilege mask state at MCDI attach.
@@ -315,4 +322,4 @@ fail1:
#endif /* EFSYS_OPT_MCDI */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c
index eb9ec2be..7dbf843b 100644
--- a/drivers/net/sfc/base/ef10_nic.c
+++ b/drivers/net/sfc/base/ef10_nic.c
@@ -10,7 +10,7 @@
#include "mcdi_mon.h"
#endif
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#include "ef10_tlv_layout.h"
@@ -25,7 +25,8 @@ efx_mcdi_get_port_assignment(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
@@ -70,7 +71,8 @@ efx_mcdi_get_port_modes(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PORT_MODES;
@@ -250,7 +252,8 @@ efx_mcdi_get_mac_address_pf(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
@@ -308,7 +311,8 @@ efx_mcdi_get_mac_address_vf(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
@@ -372,7 +376,8 @@ efx_mcdi_get_clock(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_CLOCK;
@@ -419,6 +424,64 @@ fail1:
}
__checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
+ MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
+ uint32_t end_padding;
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
+ /* RX DMA end padding is disabled */
+ end_padding = 0;
+ } else {
+ switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
+ end_padding = 64;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
+ end_padding = 128;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
+ end_padding = 256;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ }
+
+ *end_paddingp = end_padding;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
efx_mcdi_get_vector_cfg(
__in efx_nic_t *enp,
__out_opt uint32_t *vec_basep,
@@ -783,7 +846,8 @@ ef10_nic_pio_alloc(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
EFSYS_ASSERT(bufnump);
EFSYS_ASSERT(handlep);
EFSYS_ASSERT(blknump);
@@ -925,62 +989,103 @@ fail1:
return (rc);
}
- __checkReturn efx_rc_t
+static __checkReturn efx_rc_t
ef10_get_datapath_caps(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint32_t flags;
- uint32_t flags2;
- uint32_t tso2nc;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)];
efx_rc_t rc;
- if ((rc = efx_mcdi_get_capabilities(enp, &flags, NULL, NULL,
- &flags2, &tso2nc)) != 0)
- goto fail1;
-
if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
goto fail1;
-#define CAP_FLAG(flags1, field) \
- ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
-#define CAP_FLAG2(flags2, field) \
- ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+#define CAP_FLAGS1(_req, _flag) \
+ (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
+ (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
+
+#define CAP_FLAGS2(_req, _flag) \
+ (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
+ (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
+ (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
/*
* Huntington RXDP firmware inserts a 0 or 14 byte prefix.
* We only support the 14 byte prefix here.
*/
- if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
+ if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) {
rc = ENOTSUP;
- goto fail2;
+ goto fail4;
}
encp->enc_rx_prefix_size = 14;
+ /* Check if the firmware supports additional RSS modes */
+ if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
+ encp->enc_rx_scale_additional_modes_supported = B_TRUE;
+ else
+ encp->enc_rx_scale_additional_modes_supported = B_FALSE;
+
/* Check if the firmware supports TSO */
- encp->enc_fw_assisted_tso_enabled =
- CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, TX_TSO))
+ encp->enc_fw_assisted_tso_enabled = B_TRUE;
+ else
+ encp->enc_fw_assisted_tso_enabled = B_FALSE;
/* Check if the firmware supports FATSOv2 */
- encp->enc_fw_assisted_tso_v2_enabled =
- CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS2(req, TX_TSO_V2)) {
+ encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
+ } else {
+ encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = 0;
+ }
- /* Get the number of TSO contexts (FATSOv2) */
- encp->enc_fw_assisted_tso_v2_n_contexts =
- CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0;
+ /* Check if the firmware supports FATSOv2 encap */
+ if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
+ encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
+ else
+ encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
/* Check if the firmware has vadapter/vport/vswitch support */
- encp->enc_datapath_cap_evb =
- CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, EVB))
+ encp->enc_datapath_cap_evb = B_TRUE;
+ else
+ encp->enc_datapath_cap_evb = B_FALSE;
/* Check if the firmware supports VLAN insertion */
- encp->enc_hw_tx_insert_vlan_enabled =
- CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
+ encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
+ else
+ encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
/* Check if the firmware supports RX event batching */
- encp->enc_rx_batching_enabled =
- CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, RX_BATCHING))
+ encp->enc_rx_batching_enabled = B_TRUE;
+ else
+ encp->enc_rx_batching_enabled = B_FALSE;
/*
* Even if batching isn't reported as supported, we may still get
@@ -989,38 +1094,61 @@ ef10_get_datapath_caps(
encp->enc_rx_batch_max = 16;
/* Check if the firmware supports disabling scatter on RXQs */
- encp->enc_rx_disable_scatter_supported =
- CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
+ encp->enc_rx_disable_scatter_supported = B_TRUE;
+ else
+ encp->enc_rx_disable_scatter_supported = B_FALSE;
/* Check if the firmware supports packed stream mode */
- encp->enc_rx_packed_stream_supported =
- CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, RX_PACKED_STREAM))
+ encp->enc_rx_packed_stream_supported = B_TRUE;
+ else
+ encp->enc_rx_packed_stream_supported = B_FALSE;
/*
* Check if the firmware supports configurable buffer sizes
* for packed stream mode (otherwise buffer size is 1Mbyte)
*/
- encp->enc_rx_var_packed_stream_supported =
- CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
+ encp->enc_rx_var_packed_stream_supported = B_TRUE;
+ else
+ encp->enc_rx_var_packed_stream_supported = B_FALSE;
+
+ /* Check if the firmware supports equal stride super-buffer mode */
+ if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
+ encp->enc_rx_es_super_buffer_supported = B_TRUE;
+ else
+ encp->enc_rx_es_super_buffer_supported = B_FALSE;
+
+ /* Check if the firmware supports FW subvariant w/o Tx checksumming */
+ if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
+ encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
+ else
+ encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
/* Check if the firmware supports set mac with running filters */
- encp->enc_allow_set_mac_with_installed_filters =
- CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
- B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
+ encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
+ else
+ encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
/*
* Check if firmware supports the extended MC_CMD_SET_MAC, which allows
* specifying which parameters to configure.
*/
- encp->enc_enhanced_set_mac_supported =
- CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
+ encp->enc_enhanced_set_mac_supported = B_TRUE;
+ else
+ encp->enc_enhanced_set_mac_supported = B_FALSE;
/*
* Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
* us to let the firmware choose the settings to use on an EVQ.
*/
- encp->enc_init_evq_v2_supported =
- CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS2(req, INIT_EVQ_V2))
+ encp->enc_init_evq_v2_supported = B_TRUE;
+ else
+ encp->enc_init_evq_v2_supported = B_FALSE;
/*
* Check if firmware-verified NVRAM updates must be used.
@@ -1030,29 +1158,34 @@ ef10_get_datapath_caps(
* and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
* partition and report the result).
*/
- encp->enc_nvram_update_verify_result_supported =
- CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ?
- B_TRUE : B_FALSE;
+ if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
+ encp->enc_nvram_update_verify_result_supported = B_TRUE;
+ else
+ encp->enc_nvram_update_verify_result_supported = B_FALSE;
/*
* Check if firmware provides packet memory and Rx datapath
* counters.
*/
- encp->enc_pm_and_rxdp_counters =
- CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
+ encp->enc_pm_and_rxdp_counters = B_TRUE;
+ else
+ encp->enc_pm_and_rxdp_counters = B_FALSE;
/*
* Check if the 40G MAC hardware is capable of reporting
* statistics for Tx size bins.
*/
- encp->enc_mac_stats_40g_tx_size_bins =
- CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
+ encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
+ else
+ encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
/*
* Check if firmware supports VXLAN and NVGRE tunnels.
* The capability indicates Geneve protocol support as well.
*/
- if (CAP_FLAG(flags, VXLAN_NVGRE)) {
+ if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
encp->enc_tunnel_encapsulations_supported =
(1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
(1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
@@ -1066,11 +1199,136 @@ ef10_get_datapath_caps(
encp->enc_tunnel_config_udp_entries_max = 0;
}
-#undef CAP_FLAG
-#undef CAP_FLAG2
+ /*
+ * Check if firmware reports the VI window mode.
+ * Medford2 has a variable VI window size (8K, 16K or 64K).
+ * Medford and Huntington have a fixed 8K VI window size.
+ */
+ if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
+ uint8_t mode =
+ MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+
+ switch (mode) {
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
+ break;
+ default:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
+ break;
+ }
+ } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD)) {
+ /* Huntington and Medford have fixed 8K window size */
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
+ } else {
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
+ }
+
+ /* Check if firmware supports extended MAC stats. */
+ if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+ /* Extended stats buffer supported */
+ encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+ } else {
+ /* Use Siena-compatible legacy MAC stats */
+ encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
+ }
+
+ if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
+ encp->enc_fec_counters = B_TRUE;
+ else
+ encp->enc_fec_counters = B_FALSE;
+
+ /* Check if the firmware provides head-of-line blocking counters */
+ if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
+ encp->enc_hlb_counters = B_TRUE;
+ else
+ encp->enc_hlb_counters = B_FALSE;
+
+ if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
+ /* Only one exclusive RSS context is available per port. */
+ encp->enc_rx_scale_max_exclusive_contexts = 1;
+
+ switch (enp->en_family) {
+ case EFX_FAMILY_MEDFORD2:
+ encp->enc_rx_scale_hash_alg_mask =
+ (1U << EFX_RX_HASHALG_TOEPLITZ);
+ break;
+
+ case EFX_FAMILY_MEDFORD:
+ case EFX_FAMILY_HUNTINGTON:
+ /*
+ * Packed stream firmware variant maintains a
+ * non-standard algorithm for hash computation.
+ * It implies explicit XORing together
+ * source + destination IP addresses (or last
+ * four bytes in the case of IPv6) and using the
+ * resulting value as the input to a Toeplitz hash.
+ */
+ encp->enc_rx_scale_hash_alg_mask =
+ (1U << EFX_RX_HASHALG_PACKED_STREAM);
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* Port numbers cannot contribute to the hash value */
+ encp->enc_rx_scale_l4_hash_supported = B_FALSE;
+ } else {
+ /*
+ * Maximum number of exclusive RSS contexts.
+ * EF10 hardware supports 64 in total, but 6 are reserved
+ * for shared contexts. They are a global resource so
+ * not all may be available.
+ */
+ encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
+
+ encp->enc_rx_scale_hash_alg_mask =
+ (1U << EFX_RX_HASHALG_TOEPLITZ);
+
+ /*
+ * It is possible to use port numbers as
+ * the input data for hash computation.
+ */
+ encp->enc_rx_scale_l4_hash_supported = B_TRUE;
+ }
+ /* Check if the firmware supports "FLAG" and "MARK" filter actions */
+ if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
+ encp->enc_filter_action_flag_supported = B_TRUE;
+ else
+ encp->enc_filter_action_flag_supported = B_FALSE;
+
+ if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
+ encp->enc_filter_action_mark_supported = B_TRUE;
+ else
+ encp->enc_filter_action_mark_supported = B_FALSE;
+
+ /* Get maximum supported value for "MARK" filter action */
+ if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
+ encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
+ GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
+ else
+ encp->enc_filter_action_mark_max = 0;
+
+#undef CAP_FLAGS1
+#undef CAP_FLAGS2
return (0);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
@@ -1132,78 +1390,238 @@ fail1:
/*
- * Table of mapping schemes from port number to the number of the external
- * connector on the board. The external numbering does not distinguish
- * off-board separated outputs such as from multi-headed cables.
+ * Table of mapping schemes from port number to external number.
+ *
+ * Each port number ultimately corresponds to a connector: either as part of
+ * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
+ * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
+ * "Salina"). In general:
*
- * The count of adjacent port numbers that map to each external port
+ * Port number (0-based)
+ * |
+ * port mapping (n:1)
+ * |
+ * v
+ * External port number (normally 1-based)
+ * |
+ * fixed (1:1) or cable assembly (1:m)
+ * |
+ * v
+ * Connector
+ *
+ * The external numbering refers to the cages or magjacks on the board,
+ * as visibly annotated on the board or back panel. This table describes
+ * how to determine which external cage/magjack corresponds to the port
+ * numbers used by the driver.
+ *
+ * The count of adjacent port numbers that map to each external number,
* and the offset in the numbering, is determined by the chip family and
* current port mode.
*
* For the Huntington family, the current port mode cannot be discovered,
+ * but a single mapping is used by all modes for a given chip variant,
* so the mapping used is instead the last match in the table to the full
* set of port modes to which the NIC can be configured. Therefore the
* ordering of entries in the mapping table is significant.
*/
-static struct {
+static struct ef10_external_port_map_s {
efx_family_t family;
uint32_t modes_mask;
int32_t count;
int32_t offset;
} __ef10_external_port_mappings[] = {
- /* Supported modes with 1 output per external port */
+ /*
+ * Modes used by Huntington family controllers where each port
+ * number maps to a separate cage.
+ * SFN7x22F (Torino):
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * SFN7xx4F (Pavia):
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 3
+ * port 3 -> cage 4
+ */
{
EFX_FAMILY_HUNTINGTON,
- (1 << TLV_PORT_MODE_10G) |
- (1 << TLV_PORT_MODE_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G),
- 1,
- 1
+ (1U << TLV_PORT_MODE_10G) | /* mode 0 */
+ (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
+ 1, /* ports per cage */
+ 1 /* first cage */
},
+ /*
+ * Modes which for Huntington identify a chip variant where 2
+ * adjacent port numbers map to each cage.
+ * SFN7x42Q (Monza):
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
{
- EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G) |
- (1 << TLV_PORT_MODE_10G_10G),
- 1,
- 1
+ EFX_FAMILY_HUNTINGTON,
+ (1U << TLV_PORT_MODE_40G) | /* mode 1 */
+ (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
+ (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
+ (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
+ 2, /* ports per cage */
+ 1 /* first cage */
},
- /* Supported modes with 2 outputs per external port */
+ /*
+ * Modes that on Medford allocate each port number to a separate
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 3
+ * port 3 -> cage 4
+ */
{
- EFX_FAMILY_HUNTINGTON,
- (1 << TLV_PORT_MODE_40G) |
- (1 << TLV_PORT_MODE_40G_40G) |
- (1 << TLV_PORT_MODE_40G_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_40G),
- 2,
- 1
+ EFX_FAMILY_MEDFORD,
+ (1U << TLV_PORT_MODE_10G) | /* mode 0 */
+ (1U << TLV_PORT_MODE_10G_10G), /* mode 2 */
+ 1, /* ports per cage */
+ 1 /* first cage */
},
+ /*
+ * Modes that on Medford allocate 2 adjacent port numbers to each
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
{
EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_40G) |
- (1 << TLV_PORT_MODE_40G_40G) |
- (1 << TLV_PORT_MODE_40G_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_40G) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),
- 2,
- 1
+ (1U << TLV_PORT_MODE_40G) | /* mode 1 */
+ (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
+ (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
+ (1U << TLV_PORT_MODE_10G_10G_40G) | /* mode 7 */
+ /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
+ 2, /* ports per cage */
+ 1 /* first cage */
},
- /* Supported modes with 4 outputs per external port */
+ /*
+ * Modes that on Medford allocate 4 adjacent port numbers to each
+ * connector, starting on cage 1.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 1
+ * port 3 -> cage 1
+ */
{
EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1),
- 4,
- 1,
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q) | /* mode 5 */
+ /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1), /* mode 4 */
+ 4, /* ports per cage */
+ 1 /* first cage */
},
+ /*
+ * Modes that on Medford allocate 4 adjacent port numbers to each
+ * connector, starting on cage 2.
+ * port 0 -> cage 2
+ * port 1 -> cage 2
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
{
EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
- 4,
- 2
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q2), /* mode 8 */
+ 4, /* ports per cage */
+ 2 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate each port number to a separate
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 3
+ * port 3 -> cage 4
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
+ (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
+ (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */
+ (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */
+ (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */
+ (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */
+ (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */
+ 1, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * FIXME: Some port modes are not representable in this mapping:
+ * - TLV_PORT_MODE_1x2_2x1 (mode 17):
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 2
+ */
+ /*
+ * Modes that on Medford2 allocate 2 adjacent port numbers to each
+ * cage, starting on cage 1.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
+ (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */
+ (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
+ (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
+ (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */
+ (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */
+ 2, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate 2 adjacent port numbers to each
+ * cage, starting on cage 2.
+ * port 0 -> cage 2
+ * port 1 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
+ 2, /* ports per cage */
+ 2 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate 4 adjacent port numbers to each
+ * connector, starting on cage 1.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 1
+ * port 3 -> cage 1
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */
+ 4, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate 4 adjacent port numbers to each
+ * connector, starting on cage 2.
+ * port 0 -> cage 2
+ * port 1 -> cage 2
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */
+ (1U << TLV_PORT_MODE_NA_1x2), /* mode 11 */
+ 4, /* ports per cage */
+ 2 /* first cage */
},
};
- __checkReturn efx_rc_t
+static __checkReturn efx_rc_t
ef10_external_port_mapping(
__in efx_nic_t *enp,
__in uint32_t port,
@@ -1219,7 +1637,7 @@ ef10_external_port_mapping(
if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current)) != 0) {
/*
- * No current port mode information
+ * No current port mode information (i.e. Huntington)
* - infer mapping from available modes
*/
if ((rc = efx_mcdi_get_port_modes(enp,
@@ -1236,18 +1654,23 @@ ef10_external_port_mapping(
}
/*
- * Infer the internal port -> external port mapping from
+ * Infer the internal port -> external number mapping from
* the possible port modes for this NIC.
*/
for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
- if (__ef10_external_port_mappings[i].family !=
- enp->en_family)
+ struct ef10_external_port_map_s *eepmp =
+ &__ef10_external_port_mappings[i];
+ if (eepmp->family != enp->en_family)
continue;
- matches = (__ef10_external_port_mappings[i].modes_mask &
- port_modes);
+ matches = (eepmp->modes_mask & port_modes);
if (matches != 0) {
- count = __ef10_external_port_mappings[i].count;
- offset = __ef10_external_port_mappings[i].offset;
+ /*
+ * Some modes match. For some Huntington boards
+ * there will be multiple matches. The mapping on the
+ * last match is used.
+ */
+ count = eepmp->count;
+ offset = eepmp->offset;
port_modes &= ~matches;
}
}
@@ -1272,18 +1695,193 @@ fail1:
return (rc);
}
+static __checkReturn efx_rc_t
+ef10_nic_board_cfg(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t board_type = 0;
+ uint32_t base, nvec;
+ uint32_t port;
+ uint32_t mask;
+ uint32_t pf;
+ uint32_t vf;
+ uint8_t mac_addr[6] = { 0 };
+ efx_rc_t rc;
+
+ /* Get the (zero-based) MCDI port number */
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /* EFX MCDI interface uses one-based port numbers */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+ /*
+ * Disable static config checking, ONLY for manufacturing test
+ * and setup at the factory, to allow the static config to be
+ * installed.
+ */
+#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration (legacy) */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for EF10 */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail8;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
+ /* No boundary crossing limits */
+ encp->enc_tx_dma_desc_boundary = 0;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail9;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail10;
+ encp->enc_privilege_mask = mask;
+
+ /* Get remaining controller-specific board config */
+ if ((rc = enop->eno_board_cfg(enp)) != 0)
+ if (rc != EACCES)
+ goto fail11;
+
+ return (0);
+
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
__checkReturn efx_rc_t
ef10_nic_probe(
__in efx_nic_t *enp)
{
- const efx_nic_ops_t *enop = enp->en_enop;
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Read and clear any assertion state */
if ((rc = efx_mcdi_read_assertion(enp)) != 0)
@@ -1297,9 +1895,8 @@ ef10_nic_probe(
if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
goto fail3;
- if ((rc = enop->eno_board_cfg(enp)) != 0)
- if (rc != EACCES)
- goto fail4;
+ if ((rc = ef10_nic_board_cfg(enp)) != 0)
+ goto fail4;
/*
* Set default driver config limits (based on board config).
@@ -1494,10 +2091,12 @@ ef10_nic_init(
uint32_t i;
uint32_t retry;
uint32_t delay_us;
+ uint32_t vi_window_size;
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Enable reporting of some events (e.g. link change) */
if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
@@ -1555,15 +2154,21 @@ ef10_nic_init(
enp->en_arch.ef10.ena_pio_write_vi_base =
vi_count - enp->en_arch.ef10.ena_piobuf_count;
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
+ EFX_VI_WINDOW_SHIFT_INVALID);
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
+ EFX_VI_WINDOW_SHIFT_64K);
+ vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
+
/* Save UC memory mapping details */
enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
if (enp->en_arch.ef10.ena_piobuf_count > 0) {
enp->en_arch.ef10.ena_uc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
+ (vi_window_size *
enp->en_arch.ef10.ena_pio_write_vi_base);
} else {
enp->en_arch.ef10.ena_uc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
+ (vi_window_size *
enp->en_arch.ef10.ena_vi_count);
}
@@ -1573,7 +2178,7 @@ ef10_nic_init(
enp->en_arch.ef10.ena_uc_mem_map_size;
enp->en_arch.ef10.ena_wc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
+ (vi_window_size *
enp->en_arch.ef10.ena_piobuf_count);
/* Link piobufs to extra VIs in WC mapping */
@@ -1653,7 +2258,8 @@ ef10_nic_get_vi_pool(
__out uint32_t *vi_countp)
{
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* Report VIs that the client driver can use.
@@ -1674,7 +2280,8 @@ ef10_nic_get_bar_region(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* TODO: Specify host memory mapping alignment and granularity
@@ -1770,5 +2377,87 @@ fail1:
#endif /* EFSYS_OPT_DIAG */
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __out uint32_t *valuep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_NIC_GLOBAL_IN_LEN,
+ MC_CMD_GET_NIC_GLOBAL_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_set_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __in uint32_t value)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_SET_NIC_GLOBAL_IN_LEN];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
+ MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_nvram.c b/drivers/net/sfc/base/ef10_nvram.c
index 1904597c..2883ec8f 100644
--- a/drivers/net/sfc/base/ef10_nvram.c
+++ b/drivers/net/sfc/base/ef10_nvram.c
@@ -7,7 +7,7 @@
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
@@ -1349,12 +1349,16 @@ ef10_nvram_partn_read_tlv(
*/
retry = 10;
do {
- rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
- seg_data, partn_size);
- } while ((rc == EAGAIN) && (--retry > 0));
+ if ((rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
+ seg_data, partn_size)) != 0)
+ --retry;
+ } while ((rc == EAGAIN) && (retry > 0));
if (rc != 0) {
/* Failed to obtain consistent segment data */
+ if (rc == EAGAIN)
+ rc = EIO;
+
goto fail4;
}
@@ -2152,6 +2156,20 @@ static ef10_parttbl_entry_t medford_parttbl[] = {
PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE),
};
+static ef10_parttbl_entry_t medford2_parttbl[] = {
+ /* partn ports nvtype */
+ PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE),
+ PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN),
+ PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG),
+ PARTN_MAP_ENTRY(FPGA, ALL, FPGA),
+ PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP),
+ PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE),
+ PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM),
+ PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE),
+};
+
static __checkReturn efx_rc_t
ef10_parttbl_get(
__in efx_nic_t *enp,
@@ -2169,6 +2187,11 @@ ef10_parttbl_get(
*parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl);
break;
+ case EFX_FAMILY_MEDFORD2:
+ *parttblp = medford2_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(medford2_parttbl);
+ break;
+
default:
EFSYS_ASSERT(B_FALSE);
return (EINVAL);
@@ -2362,4 +2385,4 @@ fail1:
#endif /* EFSYS_OPT_NVRAM */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c
index aa8d6a2b..84acb70a 100644
--- a/drivers/net/sfc/base/ef10_phy.c
+++ b/drivers/net/sfc/base/ef10_phy.c
@@ -7,7 +7,7 @@
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static void
mcdi_phy_decode_cap(
@@ -16,6 +16,32 @@ mcdi_phy_decode_cap(
{
uint32_t mask;
+#define CHECK_CAP(_cap) \
+ EFX_STATIC_ASSERT(EFX_PHY_CAP_##_cap == MC_CMD_PHY_CAP_##_cap##_LBN)
+
+ CHECK_CAP(10HDX);
+ CHECK_CAP(10FDX);
+ CHECK_CAP(100HDX);
+ CHECK_CAP(100FDX);
+ CHECK_CAP(1000HDX);
+ CHECK_CAP(1000FDX);
+ CHECK_CAP(10000FDX);
+ CHECK_CAP(25000FDX);
+ CHECK_CAP(40000FDX);
+ CHECK_CAP(50000FDX);
+ CHECK_CAP(100000FDX);
+ CHECK_CAP(PAUSE);
+ CHECK_CAP(ASYM);
+ CHECK_CAP(AN);
+ CHECK_CAP(DDM);
+ CHECK_CAP(BASER_FEC);
+ CHECK_CAP(BASER_FEC_REQUESTED);
+ CHECK_CAP(RS_FEC);
+ CHECK_CAP(RS_FEC_REQUESTED);
+ CHECK_CAP(25G_BASER_FEC);
+ CHECK_CAP(25G_BASER_FEC_REQUESTED);
+#undef CHECK_CAP
+
mask = 0;
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
mask |= (1 << EFX_PHY_CAP_10HDX);
@@ -31,8 +57,15 @@ mcdi_phy_decode_cap(
mask |= (1 << EFX_PHY_CAP_1000FDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_25000FDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
mask |= (1 << EFX_PHY_CAP_40000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_50000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100000FDX);
+
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
mask |= (1 << EFX_PHY_CAP_PAUSE);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
@@ -40,6 +73,22 @@ mcdi_phy_decode_cap(
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
mask |= (1 << EFX_PHY_CAP_AN);
+ /* FEC caps (supported on Medford2 and later) */
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN))
+ mask |= (1 << EFX_PHY_CAP_BASER_FEC);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN))
+ mask |= (1 << EFX_PHY_CAP_BASER_FEC_REQUESTED);
+
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN))
+ mask |= (1 << EFX_PHY_CAP_RS_FEC);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN))
+ mask |= (1 << EFX_PHY_CAP_RS_FEC_REQUESTED);
+
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN))
+ mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN))
+ mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
+
*maskp = mask;
}
@@ -61,8 +110,14 @@ mcdi_phy_decode_link_mode(
if (!up)
*link_modep = EFX_LINK_DOWN;
+ else if (speed == 100000 && fd)
+ *link_modep = EFX_LINK_100000FDX;
+ else if (speed == 50000 && fd)
+ *link_modep = EFX_LINK_50000FDX;
else if (speed == 40000 && fd)
*link_modep = EFX_LINK_40000FDX;
+ else if (speed == 25000 && fd)
+ *link_modep = EFX_LINK_25000FDX;
else if (speed == 10000 && fd)
*link_modep = EFX_LINK_10000FDX;
else if (speed == 1000)
@@ -116,9 +171,18 @@ ef10_phy_link_ev(
case MCDI_EVENT_LINKCHANGE_SPEED_10G:
speed = 10000;
break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_25G:
+ speed = 25000;
+ break;
case MCDI_EVENT_LINKCHANGE_SPEED_40G:
speed = 40000;
break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_50G:
+ speed = 50000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_100G:
+ speed = 100000;
+ break;
default:
speed = 0;
break;
@@ -212,26 +276,10 @@ ef10_phy_get_link(
&elsp->els_link_mode, &elsp->els_fcntl);
#if EFSYS_OPT_LOOPBACK
- /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
-
+ /*
+ * MC_CMD_LOOPBACK and EFX_LOOPBACK names are equivalent, so use the
+ * MCDI value directly. Agreement is checked in efx_loopback_mask().
+ */
elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
#endif /* EFSYS_OPT_LOOPBACK */
@@ -289,7 +337,32 @@ ef10_phy_reconfigure(
PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
/* Too many fields for for POPULATE macros, so insert this afterwards */
MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_25000FDX, (cap_mask >> EFX_PHY_CAP_25000FDX) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_50000FDX, (cap_mask >> EFX_PHY_CAP_50000FDX) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_100000FDX, (cap_mask >> EFX_PHY_CAP_100000FDX) & 0x1);
+
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_BASER_FEC, (cap_mask >> EFX_PHY_CAP_BASER_FEC) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_BASER_FEC_REQUESTED,
+ (cap_mask >> EFX_PHY_CAP_BASER_FEC_REQUESTED) & 0x1);
+
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_RS_FEC, (cap_mask >> EFX_PHY_CAP_RS_FEC) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_RS_FEC_REQUESTED,
+ (cap_mask >> EFX_PHY_CAP_RS_FEC_REQUESTED) & 0x1);
+
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_25G_BASER_FEC,
+ (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_25G_BASER_FEC_REQUESTED,
+ (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC_REQUESTED) & 0x1);
#if EFSYS_OPT_LOOPBACK
MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
@@ -304,9 +377,18 @@ ef10_phy_reconfigure(
case EFX_LINK_10000FDX:
speed = 10000;
break;
+ case EFX_LINK_25000FDX:
+ speed = 25000;
+ break;
case EFX_LINK_40000FDX:
speed = 40000;
break;
+ case EFX_LINK_50000FDX:
+ speed = 50000;
+ break;
+ case EFX_LINK_100000FDX:
+ speed = 100000;
+ break;
default:
speed = 0;
}
@@ -606,4 +688,4 @@ ef10_bist_stop(
#endif /* EFSYS_OPT_BIST */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
index 2bb6705d..313a3691 100644
--- a/drivers/net/sfc/base/ef10_rx.c
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn efx_rc_t
@@ -21,12 +21,16 @@ efx_mcdi_init_rxq(
__in efsys_mem_t *esmp,
__in boolean_t disable_scatter,
__in boolean_t want_inner_classes,
- __in uint32_t ps_bufsize)
+ __in uint32_t ps_bufsize,
+ __in uint32_t es_bufs_per_desc,
+ __in uint32_t es_max_dma_len,
+ __in uint32_t es_buf_stride,
+ __in uint32_t hol_block_timeout)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN,
- MC_CMD_INIT_RXQ_EXT_OUT_LEN)];
+ uint8_t payload[MAX(MC_CMD_INIT_RXQ_V3_IN_LEN,
+ MC_CMD_INIT_RXQ_V3_OUT_LEN)];
int npages = EFX_RXQ_NBUFS(ndescs);
int i;
efx_qword_t *dma_addr;
@@ -37,8 +41,15 @@ efx_mcdi_init_rxq(
EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS);
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_RXQ_SIZE(ndescs))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
if (ps_bufsize > 0)
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
+ else if (es_bufs_per_desc > 0)
+ dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
else
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
@@ -65,9 +76,9 @@ efx_mcdi_init_rxq(
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_RXQ;
req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN;
+ req.emr_in_length = MC_CMD_INIT_RXQ_V3_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN;
+ req.emr_out_length = MC_CMD_INIT_RXQ_V3_OUT_LEN;
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
@@ -87,6 +98,19 @@ efx_mcdi_init_rxq(
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ if (es_bufs_per_desc > 0) {
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
+ es_bufs_per_desc);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
+ hol_block_timeout);
+ }
+
dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
addr = EFSYS_MEM_ADDR(esmp);
@@ -103,11 +127,13 @@ efx_mcdi_init_rxq(
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail1;
+ goto fail2;
}
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -291,11 +317,34 @@ efx_mcdi_rss_context_set_flags(
__in uint32_t rss_context,
__in efx_rx_hash_type_t type)
{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_rx_hash_type_t type_ipv4;
+ efx_rx_hash_type_t type_ipv4_tcp;
+ efx_rx_hash_type_t type_ipv6;
+ efx_rx_hash_type_t type_ipv6_tcp;
+ efx_rx_hash_type_t modes;
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
efx_rc_t rc;
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH);
+
if (rss_context == EF10_RSS_CONTEXT_INVALID) {
rc = EINVAL;
goto fail1;
@@ -311,15 +360,57 @@ efx_mcdi_rss_context_set_flags(
MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
rss_context);
- MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
+ type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | EFX_RX_HASH(IPV4_TCP, 2TUPLE) |
+ EFX_RX_HASH(IPV4_UDP, 2TUPLE);
+ type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+ type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | EFX_RX_HASH(IPV6_TCP, 2TUPLE) |
+ EFX_RX_HASH(IPV6_UDP, 2TUPLE);
+ type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
+
+ /*
+ * Create a copy of the original hash type.
+ * The copy will be used to fill in RSS_MODE bits and
+ * may be cleared beforehand. The original variable
+ * and, thus, EN bits will remain unaffected.
+ */
+ modes = type;
+
+ /*
+ * If the firmware lacks support for additional modes, RSS_MODE
+ * fields must contain zeros, otherwise the operation will fail.
+ */
+ if (encp->enc_rx_scale_additional_modes_supported == B_FALSE)
+ modes = 0;
+
+#define EXTRACT_RSS_MODE(_type, _class) \
+ (EFX_EXTRACT_NATIVE(_type, 0, 31, \
+ EFX_LOW_BIT(EFX_RX_CLASS_##_class), \
+ EFX_HIGH_BIT(EFX_RX_CLASS_##_class)) & \
+ EFX_MASK32(EFX_RX_CLASS_##_class))
+
+ MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
- (type & EFX_RX_HASH_IPV4) ? 1 : 0,
+ ((type & type_ipv4) == type_ipv4) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
- (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0,
+ ((type & type_ipv4_tcp) == type_ipv4_tcp) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
- (type & EFX_RX_HASH_IPV6) ? 1 : 0,
+ ((type & type_ipv6) == type_ipv6) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
- (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0);
+ ((type & type_ipv6_tcp) == type_ipv6_tcp) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV4_TCP),
+ RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV4_UDP),
+ RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV4),
+ RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV6_TCP),
+ RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV6_UDP),
+ RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV6));
+
+#undef EXTRACT_RSS_MODE
efx_mcdi_execute(enp, &req);
@@ -544,12 +635,13 @@ ef10_rx_scale_mode_set(
__in efx_rx_hash_type_t type,
__in boolean_t insert)
{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_rc_t rc;
- EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ);
EFSYS_ASSERT3U(insert, ==, B_TRUE);
- if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) {
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << alg)) == 0 ||
+ insert == B_FALSE) {
rc = EINVAL;
goto fail1;
}
@@ -698,6 +790,7 @@ ef10_rx_prefix_hash(
_NOTE(ARGUNUSED(enp))
switch (func) {
+ case EFX_RX_HASHALG_PACKED_STREAM:
case EFX_RX_HASHALG_TOEPLITZ:
return (buffer[0] |
(buffer[1] << 8) |
@@ -795,8 +888,8 @@ ef10_rx_qpush(
EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
wptr, pushed & erp->er_mask);
EFSYS_PIO_WRITE_BARRIER();
- EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
- erp->er_index, &dword, B_FALSE);
+ EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
}
#if EFSYS_OPT_RX_PACKED_STREAM
@@ -827,7 +920,7 @@ ef10_rx_qpush_ps_credits(
ERF_DZ_RX_DESC_MAGIC_CMD,
ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
ERF_DZ_RX_DESC_MAGIC_DATA, credits);
- EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
erp->er_index, &dword, B_FALSE);
rxq_state->eers_rx_packed_stream_credits = 0;
@@ -926,7 +1019,7 @@ ef10_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const efx_rxq_type_data_t *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
@@ -939,6 +1032,10 @@ ef10_rx_qcreate(
boolean_t disable_scatter;
boolean_t want_inner_classes;
unsigned int ps_buf_size;
+ uint32_t es_bufs_per_desc = 0;
+ uint32_t es_max_dma_len = 0;
+ uint32_t es_buf_stride = 0;
+ uint32_t hol_block_timeout = 0;
_NOTE(ARGUNUSED(id, erp, type_data))
@@ -965,7 +1062,7 @@ ef10_rx_qcreate(
break;
#if EFSYS_OPT_RX_PACKED_STREAM
case EFX_RXQ_TYPE_PACKED_STREAM:
- switch (type_data) {
+ switch (type_data->ertd_packed_stream.eps_buf_size) {
case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M:
ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
break;
@@ -987,6 +1084,19 @@ ef10_rx_qcreate(
}
break;
#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+ case EFX_RXQ_TYPE_ES_SUPER_BUFFER:
+ ps_buf_size = 0;
+ es_bufs_per_desc =
+ type_data->ertd_es_super_buffer.eessb_bufs_per_desc;
+ es_max_dma_len =
+ type_data->ertd_es_super_buffer.eessb_max_dma_len;
+ es_buf_stride =
+ type_data->ertd_es_super_buffer.eessb_buf_stride;
+ hol_block_timeout =
+ type_data->ertd_es_super_buffer.eessb_hol_block_timeout;
+ break;
+#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
default:
rc = ENOTSUP;
goto fail4;
@@ -1010,6 +1120,27 @@ ef10_rx_qcreate(
EFSYS_ASSERT(ps_buf_size == 0);
#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+ if (es_bufs_per_desc > 0) {
+ if (encp->enc_rx_es_super_buffer_supported == B_FALSE) {
+ rc = ENOTSUP;
+ goto fail7;
+ }
+ if (!IS_P2ALIGNED(es_max_dma_len,
+ EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
+ rc = EINVAL;
+ goto fail8;
+ }
+ if (!IS_P2ALIGNED(es_buf_stride,
+ EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
+ rc = EINVAL;
+ goto fail9;
+ }
+ }
+#else /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
+ EFSYS_ASSERT(es_bufs_per_desc == 0);
+#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
+
/* Scatter can only be disabled if the firmware supports doing so */
if (flags & EFX_RXQ_FLAG_SCATTER)
disable_scatter = B_FALSE;
@@ -1023,8 +1154,9 @@ ef10_rx_qcreate(
if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index,
esmp, disable_scatter, want_inner_classes,
- ps_buf_size)) != 0)
- goto fail7;
+ ps_buf_size, es_bufs_per_desc, es_max_dma_len,
+ es_buf_stride, hol_block_timeout)) != 0)
+ goto fail10;
erp->er_eep = eep;
erp->er_label = label;
@@ -1035,8 +1167,16 @@ ef10_rx_qcreate(
return (0);
+fail10:
+ EFSYS_PROBE(fail10);
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
fail7:
EFSYS_PROBE(fail7);
+#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
#if EFSYS_OPT_RX_PACKED_STREAM
fail6:
EFSYS_PROBE(fail6);
@@ -1087,4 +1227,4 @@ ef10_rx_fini(
#endif /* EFSYS_OPT_RX_SCALE */
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_signed_image_layout.h b/drivers/net/sfc/base/ef10_signed_image_layout.h
new file mode 100644
index 00000000..a35d1601
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_signed_image_layout.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+/* These structures define the layouts for the signed firmware image binary
+ * saved in NVRAM. The original image is in the Cryptographic message
+ * syntax (CMS) format which contains the bootable firmware binary plus the
+ * signatures. The entire image is written into NVRAM to enable the firmware
+ * to validate the signatures. However, the bootrom still requires the
+ * bootable-image to start at offset 0 of the NVRAM partition. Hence the image
+ * is parsed upfront by host utilities (sfupdate) and written into nvram as
+ * 'signed_image_chunks' described by a header.
+ *
+ * This file is used by the MC as well as host-utilities (sfupdate).
+ */
+
+
+#ifndef CI_MGMT_SIGNED_IMAGE_LAYOUT_H
+#define CI_MGMT_SIGNED_IMAGE_LAYOUT_H
+
+/* Signed image chunk type identifiers */
+enum {
+ SIGNED_IMAGE_CHUNK_CMS_HEADER, /* CMS header describing the signed data */
+ SIGNED_IMAGE_CHUNK_REFLASH_HEADER, /* Reflash header */
+ SIGNED_IMAGE_CHUNK_IMAGE, /* Bootable binary image */
+ SIGNED_IMAGE_CHUNK_REFLASH_TRAILER, /* Reflash trailer */
+ SIGNED_IMAGE_CHUNK_SIGNATURE, /* Remaining contents of the signed image,
+ * including the certifiates and signature */
+ NUM_SIGNED_IMAGE_CHUNKS,
+};
+
+/* Magic */
+#define SIGNED_IMAGE_CHUNK_HDR_MAGIC 0xEF105161 /* EF10 SIGned Image */
+
+/* Initial version definition - version 1 */
+#define SIGNED_IMAGE_CHUNK_HDR_VERSION 0x1
+
+/* Header length is 32 bytes */
+#define SIGNED_IMAGE_CHUNK_HDR_LEN 32
+/* Structure describing the header of each chunk of signed image
+ * as stored in nvram
+ */
+typedef struct signed_image_chunk_hdr_e {
+ /* Magic field to recognise a valid entry
+ * should match SIGNED_IMAGE_CHUNK_HDR_MAGIC
+ */
+ uint32_t magic;
+ /* Version number of this header */
+ uint32_t version;
+ /* Chunk type identifier */
+ uint32_t id;
+ /* Chunk offset */
+ uint32_t offset;
+ /* Chunk length */
+ uint32_t len;
+ /* Reserved for future expansion of this structure - always set to zeros */
+ uint32_t reserved[3];
+} signed_image_chunk_hdr_t;
+
+#endif /* CI_MGMT_SIGNED_IMAGE_LAYOUT_H */
diff --git a/drivers/net/sfc/base/ef10_tlv_layout.h b/drivers/net/sfc/base/ef10_tlv_layout.h
index 2473a66a..56cffaee 100644
--- a/drivers/net/sfc/base/ef10_tlv_layout.h
+++ b/drivers/net/sfc/base/ef10_tlv_layout.h
@@ -4,6 +4,14 @@
* All rights reserved.
*/
+/*
+ * This is NOT the original source file. Do NOT edit it.
+ * To update the tlv layout, please edit the copy in
+ * the sfregistry repo and then, in that repo,
+ * "make tlv_headers" or "make export" to
+ * regenerate and export all types of headers.
+ */
+
/* These structures define the layouts for the TLV items stored in static and
* dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).
*
@@ -32,6 +40,7 @@
* 1: dynamic configuration
* 2: firmware internal use
* 3: license partition
+ * 4: tsa configuration
*
* - TTT is a type, which is just a unique value. The same type value
* might appear in both locations, indicating a relationship between
@@ -407,6 +416,8 @@ struct tlv_firmware_options {
#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \
MC_CMD_FW_PACKED_STREAM_HASH_MODE_1
#define TLV_FIRMWARE_VARIANT_RULES_ENGINE MC_CMD_FW_RULES_ENGINE
+#define TLV_FIRMWARE_VARIANT_DPDK MC_CMD_FW_DPDK
+#define TLV_FIRMWARE_VARIANT_L3XUDP MC_CMD_FW_L3XUDP
};
/* Voltage settings
@@ -525,6 +536,17 @@ struct tlv_pcie_config_r2 {
* number of externally visible ports (and, hence, PF to port mapping), so must
* be done at boot time.
*
+ * Port mode naming convention is
+ *
+ * [nports_on_cage0]x[port_lane_width]_[nports_on_cage1]x[port_lane_width]
+ *
+ * Port lane width determines the capabilities (speeds) of the ports, subject
+ * to architecture capabilities (e.g. 25G support) and switch bandwidth
+ * constraints:
+ * - single lane ports can do 25G/10G/1G
+ * - dual lane ports can do 50G/25G/10G/1G (with fallback to 1 lane)
+ * - quad lane ports can do 100G/40G/50G/25G/10G/1G (with fallback to 2 or 1 lanes)
+
* This tag supercedes tlv_global_port_config.
*/
@@ -535,18 +557,58 @@ struct tlv_global_port_mode {
uint32_t length;
uint32_t port_mode;
#define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */
-#define TLV_PORT_MODE_10G (0) /* 10G, single SFP/10G-KR */
-#define TLV_PORT_MODE_40G (1) /* 40G, single QSFP/40G-KR */
-#define TLV_PORT_MODE_10G_10G (2) /* 2x10G, dual SFP/10G-KR or single QSFP */
-#define TLV_PORT_MODE_40G_40G (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */
-#define TLV_PORT_MODE_10G_10G_10G_10G (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport) */
-#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) /* 4x10G, single QSFP, cage 0 (Medford) */
-#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) /* 4x10G, single QSFP, cage 0 (Medford) OBSOLETE DO NOT USE */
-#define TLV_PORT_MODE_40G_10G_10G (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */
-#define TLV_PORT_MODE_10G_10G_40G (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */
-#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) /* 4x10G, single QSFP, cage 1 (Medford) */
-#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) /* 2x10G + 2x10G, dual QSFP (Medford) */
-#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2
+
+/* Huntington port modes */
+#define TLV_PORT_MODE_10G (0)
+#define TLV_PORT_MODE_40G (1)
+#define TLV_PORT_MODE_10G_10G (2)
+#define TLV_PORT_MODE_40G_40G (3)
+#define TLV_PORT_MODE_10G_10G_10G_10G (4)
+#define TLV_PORT_MODE_40G_10G_10G (6)
+#define TLV_PORT_MODE_10G_10G_40G (7)
+
+/* Medford (and later) port modes */
+#define TLV_PORT_MODE_1x1_NA (0) /* Single 10G/25G on mdi0 */
+#define TLV_PORT_MODE_1x4_NA (1) /* Single 100G/40G on mdi0 */
+#define TLV_PORT_MODE_NA_1x4 (22) /* Single 100G/40G on mdi1 */
+#define TLV_PORT_MODE_1x2_NA (10) /* Single 50G on mdi0 */
+#define TLV_PORT_MODE_NA_1x2 (11) /* Single 50G on mdi1 */
+#define TLV_PORT_MODE_1x1_1x1 (2) /* Single 10G/25G on mdi0, single 10G/25G on mdi1 */
+#define TLV_PORT_MODE_1x4_1x4 (3) /* Single 40G on mdi0, single 40G on mdi1 */
+#define TLV_PORT_MODE_2x1_2x1 (5) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1 */
+#define TLV_PORT_MODE_4x1_NA (4) /* Quad 10G/25G on mdi0 */
+#define TLV_PORT_MODE_NA_4x1 (8) /* Quad 10G/25G on mdi1 */
+#define TLV_PORT_MODE_1x4_2x1 (6) /* Single 40G on mdi0, dual 10G/25G on mdi1 */
+#define TLV_PORT_MODE_2x1_1x4 (7) /* Dual 10G/25G on mdi0, single 40G on mdi1 */
+#define TLV_PORT_MODE_1x2_1x2 (12) /* Single 50G on mdi0, single 50G on mdi1 */
+#define TLV_PORT_MODE_2x2_NA (13) /* Dual 50G on mdi0 */
+#define TLV_PORT_MODE_NA_2x2 (14) /* Dual 50G on mdi1 */
+#define TLV_PORT_MODE_1x4_1x2 (15) /* Single 40G on mdi0, single 50G on mdi1 */
+#define TLV_PORT_MODE_1x2_1x4 (16) /* Single 50G on mdi0, single 40G on mdi1 */
+#define TLV_PORT_MODE_1x2_2x1 (17) /* Single 50G on mdi0, dual 10G/25G on mdi1 */
+#define TLV_PORT_MODE_2x1_1x2 (18) /* Dual 10G/25G on mdi0, single 50G on mdi1 */
+
+/* Snapper-only Medford2 port modes.
+ * These modes are eftest only, to allow snapper explicit
+ * selection between multi-channel and LLPCS. In production,
+ * this selection is automatic and outside world should not
+ * care about LLPCS.
+ */
+#define TLV_PORT_MODE_2x1_2x1_LL (19) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1, low-latency PCS */
+#define TLV_PORT_MODE_4x1_NA_LL (20) /* Quad 10G/25G on mdi0, low-latency PCS */
+#define TLV_PORT_MODE_NA_4x1_LL (21) /* Quad 10G/25G on mdi1, low-latency PCS */
+#define TLV_PORT_MODE_1x1_NA_LL (23) /* Single 10G/25G on mdi0, low-latency PCS */
+#define TLV_PORT_MODE_1x1_1x1_LL (24) /* Single 10G/25G on mdi0, single 10G/25G on mdi1, low-latency PCS */
+#define TLV_PORT_MODE_BUG63720_DO_NOT_USE (9) /* bug63720: Do not use */
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL
+
+/* Deprecated Medford aliases - DO NOT USE IN NEW CODE */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5)
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4)
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8)
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9)
+
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL
};
/* Type of the v-switch created implicitly by the firmware */
@@ -791,7 +853,7 @@ typedef struct tlv_license {
uint8_t data[];
} tlv_license_t;
-/* TSA NIC IP address configuration
+/* TSA NIC IP address configuration (DEPRECATED)
*
* Sets the TSA NIC IP address statically via configuration tool or dynamically
* via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop)
@@ -801,7 +863,7 @@ typedef struct tlv_license {
* released code yet.
*/
-#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000)
+#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000) /* DEPRECATED */
#define TLV_TSAN_IP_MODE_STATIC (0)
#define TLV_TSAN_IP_MODE_DHCP (1)
@@ -818,7 +880,7 @@ typedef struct tlv_tsan_config {
uint32_t bind_bkout; /* DEPRECATED */
} tlv_tsan_config_t;
-/* TSA Controller IP address configuration
+/* TSA Controller IP address configuration (DEPRECATED)
*
* Sets the TSA Controller IP address statically via configuration tool
*
@@ -827,7 +889,7 @@ typedef struct tlv_tsan_config {
* released code yet.
*/
-#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000)
+#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000) /* DEPRECATED */
#define TLV_MAX_TSACS (4)
typedef struct tlv_tsac_config {
@@ -838,7 +900,7 @@ typedef struct tlv_tsac_config {
uint32_t port[TLV_MAX_TSACS];
} tlv_tsac_config_t;
-/* Binding ticket
+/* Binding ticket (DEPRECATED)
*
* Sets the TSA NIC binding ticket used for binding process between the TSA NIC
* and the TSA Controller
@@ -848,7 +910,7 @@ typedef struct tlv_tsac_config {
* released code yet.
*/
-#define TLV_TAG_TMP_BINDING_TICKET (0x10240000)
+#define TLV_TAG_TMP_BINDING_TICKET (0x10240000) /* DEPRECATED */
typedef struct tlv_binding_ticket {
uint32_t tag;
@@ -873,7 +935,7 @@ typedef struct tlv_pik_sf {
uint8_t bytes[];
} tlv_pik_sf_t;
-/* CA root certificate
+/* CA root certificate (DEPRECATED)
*
* Sets the CA root certificate used for TSA Controller verfication during
* TLS connection setup between the TSA NIC and the TSA Controller
@@ -883,7 +945,7 @@ typedef struct tlv_pik_sf {
* released code yet.
*/
-#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000)
+#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000) /* DEPRECATED */
typedef struct tlv_ca_root_cert {
uint32_t tag;
@@ -933,4 +995,17 @@ struct tlv_fastpd_mode {
#define TLV_FASTPD_MODE_FAST_SUPPORTED 2 /* Supported packet types to the FastPD; everything else to the SoftPD */
};
+/* L3xUDP datapath firmware UDP port configuration
+ *
+ * Sets the list of UDP ports on which the encapsulation will be handled.
+ * The number of ports in the list is implied by the length of the TLV item.
+ */
+#define TLV_TAG_L3XUDP_PORTS (0x102a0000)
+struct tlv_l3xudp_ports {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t ports[];
+#define TLV_TAG_L3XUDP_PORTS_MAX_NUM_PORTS 16
+};
+
#endif /* CI_MGMT_TLV_LAYOUT_H */
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
index 69c75700..7d27f710 100644
--- a/drivers/net/sfc/base/ef10_tx.c
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_QSTATS
#define EFX_TX_QSTAT_INCR(_etp, _stat) \
@@ -42,10 +42,15 @@ efx_mcdi_init_txq(
EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_TXQ_SIZE(ndescs))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
npages = EFX_TXQ_NBUFS(ndescs);
if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
rc = EINVAL;
- goto fail1;
+ goto fail2;
}
(void) memset(payload, 0, sizeof (payload));
@@ -94,11 +99,13 @@ efx_mcdi_init_txq(
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail2;
+ goto fail3;
}
return (0);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
@@ -176,7 +183,7 @@ ef10_tx_qcreate(
{
efx_nic_cfg_t *encp = &enp->en_nic_cfg;
uint16_t inner_csum;
- efx_qword_t desc;
+ efx_desc_t desc;
efx_rc_t rc;
_NOTE(ARGUNUSED(id))
@@ -201,19 +208,9 @@ ef10_tx_qcreate(
* a no-op TX option descriptor. See bug29981 for details.
*/
*addedp = 1;
- EFX_POPULATE_QWORD_6(desc,
- ESF_DZ_TX_DESC_IS_OPT, 1,
- ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
- ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
- (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
- ESF_DZ_TX_OPTION_IP_CSUM,
- (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
- ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
- (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
- ESF_DZ_TX_OPTION_INNER_IP_CSUM,
- (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
+ ef10_tx_qdesc_checksum_create(etp, flags, &desc);
- EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
+ EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq);
ef10_tx_qpush(etp, *addedp, 0);
return (0);
@@ -511,8 +508,8 @@ ef10_tx_qpush(
EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
wptr, id);
EFSYS_PIO_WRITE_BARRIER();
- EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
- etp->et_index, &oword);
+ EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
+ etp->et_index, &oword);
} else {
efx_dword_t dword;
@@ -527,8 +524,8 @@ ef10_tx_qpush(
EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
wptr, id);
EFSYS_PIO_WRITE_BARRIER();
- EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
- etp->et_index, &dword, B_FALSE);
+ EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
+ etp->et_index, &dword, B_FALSE);
}
}
@@ -626,6 +623,7 @@ ef10_tx_qdesc_tso_create(
ef10_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t tcp_mss,
__out_ecount(count) efx_desc_t *edp,
@@ -639,13 +637,14 @@ ef10_tx_qdesc_tso2_create(
EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
- EFX_POPULATE_QWORD_5(edp[0].ed_eq,
+ EFX_POPULATE_QWORD_6(edp[0].ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
EFX_POPULATE_QWORD_4(edp[1].ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
@@ -675,6 +674,30 @@ ef10_tx_qdesc_vlantci_create(
ESF_DZ_TX_VLAN_TAG1, tci);
}
+ void
+ef10_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp)
+{
+ _NOTE(ARGUNUSED(etp));
+
+ EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index,
+ uint32_t, flags);
+
+ EFX_POPULATE_QWORD_6(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
+ ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_INNER_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
+}
+
__checkReturn efx_rc_t
ef10_tx_qpace(
@@ -752,4 +775,4 @@ ef10_tx_qstats_update(
#endif /* EFSYS_OPT_QSTATS */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_vpd.c b/drivers/net/sfc/base/ef10_vpd.c
index ad522e82..097fe1d4 100644
--- a/drivers/net/sfc/base/ef10_vpd.c
+++ b/drivers/net/sfc/base/ef10_vpd.c
@@ -10,7 +10,7 @@
#if EFSYS_OPT_VPD
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#include "ef10_tlv_layout.h"
@@ -26,7 +26,8 @@ ef10_vpd_init(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_nic_cfg.enc_vpd_is_global) {
tag = TLV_TAG_GLOBAL_STATIC_VPD;
@@ -82,7 +83,8 @@ ef10_vpd_size(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* This function returns the total size the user should allocate
@@ -115,7 +117,8 @@ ef10_vpd_read(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_nic_cfg.enc_vpd_is_global) {
tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
@@ -133,19 +136,22 @@ ef10_vpd_read(
rc = ENOSPC;
goto fail2;
}
- memcpy(data, dvpd, dvpd_size);
+ if (dvpd != NULL)
+ memcpy(data, dvpd, dvpd_size);
/* Pad data with all-1s, consistent with update operations */
memset(data + dvpd_size, 0xff, size - dvpd_size);
- EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+ if (dvpd != NULL)
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
return (0);
fail2:
EFSYS_PROBE(fail2);
- EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+ if (dvpd != NULL)
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -167,7 +173,8 @@ ef10_vpd_verify(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* Strictly you could take the view that dynamic vpd is optional.
@@ -288,7 +295,8 @@ ef10_vpd_get(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Attempt to satisfy the request from svpd first */
if (enp->en_arch.ef10.ena_svpd_length > 0) {
@@ -334,7 +342,8 @@ ef10_vpd_set(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* If the provided (tag,keyword) exists in svpd, then it is readonly */
if (enp->en_arch.ef10.ena_svpd_length > 0) {
@@ -387,7 +396,8 @@ ef10_vpd_write(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_nic_cfg.enc_vpd_is_global) {
tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
@@ -423,7 +433,8 @@ ef10_vpd_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_arch.ef10.ena_svpd_length > 0) {
EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length,
@@ -434,6 +445,6 @@ ef10_vpd_fini(
}
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
#endif /* EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h
index fe996e7c..5108b9b1 100644
--- a/drivers/net/sfc/base/efx.h
+++ b/drivers/net/sfc/base/efx.h
@@ -40,6 +40,7 @@ typedef enum efx_family_e {
EFX_FAMILY_SIENA,
EFX_FAMILY_HUNTINGTON,
EFX_FAMILY_MEDFORD,
+ EFX_FAMILY_MEDFORD2,
EFX_FAMILY_NTYPES
} efx_family_t;
@@ -47,7 +48,8 @@ extern __checkReturn efx_rc_t
efx_family(
__in uint16_t venid,
__in uint16_t devid,
- __out efx_family_t *efp);
+ __out efx_family_t *efp,
+ __out unsigned int *membarp);
#define EFX_PCI_VENID_SFC 0x1924
@@ -69,7 +71,21 @@ efx_family(
#define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */
#define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */
-#define EFX_MEM_BAR 2
+#define EFX_PCI_DEVID_MEDFORD2_PF_UNINIT 0x0B13
+#define EFX_PCI_DEVID_MEDFORD2 0x0B03 /* SFC9250 PF */
+#define EFX_PCI_DEVID_MEDFORD2_VF 0x1B03 /* SFC9250 VF */
+
+
+#define EFX_MEM_BAR_SIENA 2
+
+#define EFX_MEM_BAR_HUNTINGTON_PF 2
+#define EFX_MEM_BAR_HUNTINGTON_VF 0
+
+#define EFX_MEM_BAR_MEDFORD_PF 2
+#define EFX_MEM_BAR_MEDFORD_VF 0
+
+#define EFX_MEM_BAR_MEDFORD2 0
+
/* Error codes */
@@ -113,9 +129,22 @@ efx_nic_create(
__in efsys_lock_t *eslp,
__deref_out efx_nic_t **enpp);
+/* EFX_FW_VARIANT codes map one to one on MC_CMD_FW codes */
+typedef enum efx_fw_variant_e {
+ EFX_FW_VARIANT_FULL_FEATURED,
+ EFX_FW_VARIANT_LOW_LATENCY,
+ EFX_FW_VARIANT_PACKED_STREAM,
+ EFX_FW_VARIANT_HIGH_TX_RATE,
+ EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1,
+ EFX_FW_VARIANT_RULES_ENGINE,
+ EFX_FW_VARIANT_DPDK,
+ EFX_FW_VARIANT_DONT_CARE = 0xffffffff
+} efx_fw_variant_t;
+
extern __checkReturn efx_rc_t
efx_nic_probe(
- __in efx_nic_t *enp);
+ __in efx_nic_t *enp,
+ __in efx_fw_variant_t efv);
extern __checkReturn efx_rc_t
efx_nic_init(
@@ -171,7 +200,7 @@ efx_nic_check_pcie_link_speed(
#if EFSYS_OPT_MCDI
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
/* Huntington and Medford require MCDIv2 commands */
#define WITH_MCDI_V2 1
#endif
@@ -307,7 +336,7 @@ efx_intr_fini(
#if EFSYS_OPT_MAC_STATS
-/* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */
+/* START MKCONFIG GENERATED EfxHeaderMacBlock ea466a9bc8789994 */
typedef enum efx_mac_stat_e {
EFX_MAC_RX_OCTETS,
EFX_MAC_RX_PKTS,
@@ -390,6 +419,31 @@ typedef enum efx_mac_stat_e {
EFX_MAC_VADAPTER_TX_BAD_PACKETS,
EFX_MAC_VADAPTER_TX_BAD_BYTES,
EFX_MAC_VADAPTER_TX_OVERFLOW,
+ EFX_MAC_FEC_UNCORRECTED_ERRORS,
+ EFX_MAC_FEC_CORRECTED_ERRORS,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3,
+ EFX_MAC_CTPIO_VI_BUSY_FALLBACK,
+ EFX_MAC_CTPIO_LONG_WRITE_SUCCESS,
+ EFX_MAC_CTPIO_MISSING_DBELL_FAIL,
+ EFX_MAC_CTPIO_OVERFLOW_FAIL,
+ EFX_MAC_CTPIO_UNDERFLOW_FAIL,
+ EFX_MAC_CTPIO_TIMEOUT_FAIL,
+ EFX_MAC_CTPIO_NONCONTIG_WR_FAIL,
+ EFX_MAC_CTPIO_FRM_CLOBBER_FAIL,
+ EFX_MAC_CTPIO_INVALID_WR_FAIL,
+ EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK,
+ EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK,
+ EFX_MAC_CTPIO_RUNT_FALLBACK,
+ EFX_MAC_CTPIO_SUCCESS,
+ EFX_MAC_CTPIO_FALLBACK,
+ EFX_MAC_CTPIO_POISON,
+ EFX_MAC_CTPIO_ERASE,
+ EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC,
+ EFX_MAC_RXDP_HLB_IDLE,
+ EFX_MAC_RXDP_HLB_TIMEOUT,
EFX_MAC_NSTATS
} efx_mac_stat_t;
@@ -408,11 +462,16 @@ typedef enum efx_link_mode_e {
EFX_LINK_1000FDX,
EFX_LINK_10000FDX,
EFX_LINK_40000FDX,
+ EFX_LINK_25000FDX,
+ EFX_LINK_50000FDX,
+ EFX_LINK_100000FDX,
EFX_LINK_NMODES
} efx_link_mode_t;
#define EFX_MAC_ADDR_LEN 6
+#define EFX_VNI_OR_VSID_LEN 3
+
#define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01)
#define EFX_MAC_MULTICAST_LIST_MAX 256
@@ -536,7 +595,6 @@ efx_mac_stats_get_mask(
((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \
(1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1))))
-#define EFX_MAC_STATS_SIZE 0x400
extern __checkReturn efx_rc_t
efx_mac_stats_clear(
@@ -545,8 +603,8 @@ efx_mac_stats_clear(
/*
* Upload mac statistics supported by the hardware into the given buffer.
*
- * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes,
- * and page aligned.
+ * The DMA buffer must be 4Kbyte aligned and sized to hold at least
+ * efx_nic_cfg_t::enc_mac_stats_nstats 64bit counters.
*
* The hardware will only DMA statistics that it understands (of course).
* Drivers should not make any assumptions about which statistics are
@@ -603,7 +661,7 @@ efx_mon_init(
#define EFX_MON_STATS_PAGE_SIZE 0x100
#define EFX_MON_MASK_ELEMENT_SIZE 32
-/* START MKCONFIG GENERATED MonitorHeaderStatsBlock aa0233c80156308e */
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 400fdb0517af1fca */
typedef enum efx_mon_stat_e {
EFX_MON_STAT_2_5V,
EFX_MON_STAT_VCCP1,
@@ -684,6 +742,10 @@ typedef enum efx_mon_stat_e {
EFX_MON_STAT_BOARD_BACK_TEMP,
EFX_MON_STAT_I1V8,
EFX_MON_STAT_I2V5,
+ EFX_MON_STAT_I3V3,
+ EFX_MON_STAT_I12V0,
+ EFX_MON_STAT_1_3V,
+ EFX_MON_STAT_I1V3,
EFX_MON_NSTATS
} efx_mon_stat_t;
@@ -788,6 +850,9 @@ typedef enum efx_loopback_type_e {
EFX_LOOPBACK_SD_FEP1_5_WS = 32,
EFX_LOOPBACK_SD_FEP_WS = 33,
EFX_LOOPBACK_SD_FES_WS = 34,
+ EFX_LOOPBACK_AOE_INT_NEAR = 35,
+ EFX_LOOPBACK_DATA_WS = 36,
+ EFX_LOOPBACK_FORCE_EXT_LINK = 37,
EFX_LOOPBACK_NTYPES
} efx_loopback_type_t;
@@ -843,6 +908,16 @@ typedef enum efx_phy_cap_type_e {
EFX_PHY_CAP_ASYM,
EFX_PHY_CAP_AN,
EFX_PHY_CAP_40000FDX,
+ EFX_PHY_CAP_DDM,
+ EFX_PHY_CAP_100000FDX,
+ EFX_PHY_CAP_25000FDX,
+ EFX_PHY_CAP_50000FDX,
+ EFX_PHY_CAP_BASER_FEC,
+ EFX_PHY_CAP_BASER_FEC_REQUESTED,
+ EFX_PHY_CAP_RS_FEC,
+ EFX_PHY_CAP_RS_FEC_REQUESTED,
+ EFX_PHY_CAP_25G_BASER_FEC,
+ EFX_PHY_CAP_25G_BASER_FEC_REQUESTED,
EFX_PHY_CAP_NTYPES
} efx_phy_cap_type_t;
@@ -1080,6 +1155,13 @@ typedef enum efx_tunnel_protocol_e {
EFX_TUNNEL_NPROTOS
} efx_tunnel_protocol_t;
+typedef enum efx_vi_window_shift_e {
+ EFX_VI_WINDOW_SHIFT_INVALID = 0,
+ EFX_VI_WINDOW_SHIFT_8K = 13,
+ EFX_VI_WINDOW_SHIFT_16K = 14,
+ EFX_VI_WINDOW_SHIFT_64K = 16,
+} efx_vi_window_shift_t;
+
typedef struct efx_nic_cfg_s {
uint32_t enc_board_type;
uint32_t enc_phy_type;
@@ -1093,6 +1175,7 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32];
#endif
unsigned int enc_features;
+ efx_vi_window_shift_t enc_vi_window_shift;
uint8_t enc_mac_addr[6];
uint8_t enc_port; /* PHY port number */
uint32_t enc_intr_vec_base;
@@ -1112,6 +1195,17 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_rx_buf_align_start;
uint32_t enc_rx_buf_align_end;
uint32_t enc_rx_scale_max_exclusive_contexts;
+ /*
+ * Mask of supported hash algorithms.
+ * Hash algorithm types are used as the bit indices.
+ */
+ uint32_t enc_rx_scale_hash_alg_mask;
+ /*
+ * Indicates whether port numbers can be included to the
+ * input data for hash computation.
+ */
+ boolean_t enc_rx_scale_l4_hash_supported;
+ boolean_t enc_rx_scale_additional_modes_supported;
#if EFSYS_OPT_LOOPBACK
efx_qword_t enc_loopback_types[EFX_LINK_NMODES];
#endif /* EFSYS_OPT_LOOPBACK */
@@ -1137,11 +1231,11 @@ typedef struct efx_nic_cfg_s {
#if EFSYS_OPT_BIST
uint32_t enc_bist_mask;
#endif /* EFSYS_OPT_BIST */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
uint32_t enc_pf;
uint32_t enc_vf;
uint32_t enc_privilege_mask;
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
boolean_t enc_bug26807_workaround;
boolean_t enc_bug35388_workaround;
boolean_t enc_bug41750_workaround;
@@ -1165,6 +1259,7 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_tx_tso_tcp_header_offset_limit;
boolean_t enc_fw_assisted_tso_enabled;
boolean_t enc_fw_assisted_tso_v2_enabled;
+ boolean_t enc_fw_assisted_tso_v2_encap_enabled;
/* Number of TSO contexts on the NIC (FATSOv2) */
uint32_t enc_fw_assisted_tso_v2_n_contexts;
boolean_t enc_hw_tx_insert_vlan_enabled;
@@ -1178,6 +1273,8 @@ typedef struct efx_nic_cfg_s {
boolean_t enc_init_evq_v2_supported;
boolean_t enc_rx_packed_stream_supported;
boolean_t enc_rx_var_packed_stream_supported;
+ boolean_t enc_rx_es_super_buffer_supported;
+ boolean_t enc_fw_subvariant_no_tx_csum_supported;
boolean_t enc_pm_and_rxdp_counters;
boolean_t enc_mac_stats_40g_tx_size_bins;
uint32_t enc_tunnel_encapsulations_supported;
@@ -1196,6 +1293,14 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_max_pcie_link_gen;
/* Firmware verifies integrity of NVRAM updates */
uint32_t enc_nvram_update_verify_result_supported;
+ /* Firmware support for extended MAC_STATS buffer */
+ uint32_t enc_mac_stats_nstats;
+ boolean_t enc_fec_counters;
+ boolean_t enc_hlb_counters;
+ /* Firmware support for "FLAG" and "MARK" filter actions */
+ boolean_t enc_filter_action_flag_supported;
+ boolean_t enc_filter_action_mark_supported;
+ uint32_t enc_filter_action_mark_max;
} efx_nic_cfg_t;
#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
@@ -1210,6 +1315,13 @@ extern const efx_nic_cfg_t *
efx_nic_cfg_get(
__in efx_nic_t *enp);
+/* RxDPCPU firmware id values by which FW variant can be identified */
+#define EFX_RXDP_FULL_FEATURED_FW_ID 0x0
+#define EFX_RXDP_LOW_LATENCY_FW_ID 0x1
+#define EFX_RXDP_PACKED_STREAM_FW_ID 0x2
+#define EFX_RXDP_RULES_ENGINE_FW_ID 0x5
+#define EFX_RXDP_DPDK_FW_ID 0x6
+
typedef struct efx_nic_fw_info_s {
/* Basic FW version information */
uint16_t enfi_mc_fw_version[4];
@@ -1498,6 +1610,92 @@ efx_bootcfg_write(
#endif /* EFSYS_OPT_BOOTCFG */
+#if EFSYS_OPT_IMAGE_LAYOUT
+
+#include "ef10_signed_image_layout.h"
+
+/*
+ * Image header used in unsigned and signed image layouts (see SF-102785-PS).
+ *
+ * NOTE:
+ * The image header format is extensible. However, older drivers require an
+ * exact match of image header version and header length when validating and
+ * writing firmware images.
+ *
+ * To avoid breaking backward compatibility, we use the upper bits of the
+ * controller version fields to contain an extra version number used for
+ * combined bootROM and UEFI ROM images on EF10 and later (to hold the UEFI ROM
+ * version). See bug39254 and SF-102785-PS for details.
+ */
+typedef struct efx_image_header_s {
+ uint32_t eih_magic;
+ uint32_t eih_version;
+ uint32_t eih_type;
+ uint32_t eih_subtype;
+ uint32_t eih_code_size;
+ uint32_t eih_size;
+ union {
+ uint32_t eih_controller_version_min;
+ struct {
+ uint16_t eih_controller_version_min_short;
+ uint8_t eih_extra_version_a;
+ uint8_t eih_extra_version_b;
+ };
+ };
+ union {
+ uint32_t eih_controller_version_max;
+ struct {
+ uint16_t eih_controller_version_max_short;
+ uint8_t eih_extra_version_c;
+ uint8_t eih_extra_version_d;
+ };
+ };
+ uint16_t eih_code_version_a;
+ uint16_t eih_code_version_b;
+ uint16_t eih_code_version_c;
+ uint16_t eih_code_version_d;
+} efx_image_header_t;
+
+#define EFX_IMAGE_HEADER_SIZE (40)
+#define EFX_IMAGE_HEADER_VERSION (4)
+#define EFX_IMAGE_HEADER_MAGIC (0x106F1A5)
+
+
+typedef struct efx_image_trailer_s {
+ uint32_t eit_crc;
+} efx_image_trailer_t;
+
+#define EFX_IMAGE_TRAILER_SIZE (4)
+
+typedef enum efx_image_format_e {
+ EFX_IMAGE_FORMAT_NO_IMAGE,
+ EFX_IMAGE_FORMAT_INVALID,
+ EFX_IMAGE_FORMAT_UNSIGNED,
+ EFX_IMAGE_FORMAT_SIGNED,
+} efx_image_format_t;
+
+typedef struct efx_image_info_s {
+ efx_image_format_t eii_format;
+ uint8_t * eii_imagep;
+ size_t eii_image_size;
+ efx_image_header_t * eii_headerp;
+} efx_image_info_t;
+
+extern __checkReturn efx_rc_t
+efx_check_reflash_image(
+ __in void *bufferp,
+ __in uint32_t buffer_size,
+ __out efx_image_info_t *infop);
+
+extern __checkReturn efx_rc_t
+efx_build_signed_image_write_buffer(
+ __out uint8_t *bufferp,
+ __in uint32_t buffer_size,
+ __in efx_image_info_t *infop,
+ __out efx_image_header_t **headerpp);
+
+#endif /* EFSYS_OPT_IMAGE_LAYOUT */
+
#if EFSYS_OPT_DIAG
typedef enum efx_pattern_type_t {
@@ -1674,7 +1872,7 @@ typedef __checkReturn boolean_t
__in uint32_t size,
__in uint16_t flags);
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
/*
* Packed stream mode is documented in SF-112241-TC.
@@ -1684,6 +1882,13 @@ typedef __checkReturn boolean_t
* packets are put there in a continuous stream.
* The main advantage of such an approach is that RX queue refilling
* happens much less frequently.
+ *
+ * Equal stride packed stream mode is documented in SF-119419-TC.
+ * The general idea is to utilize advantages of the packed stream,
+ * but avoid indirection in packets representation.
+ * The main advantage of such an approach is that RX queue refilling
+ * happens much less frequently and packets buffers are independent
+ * from upper layers point of view.
*/
typedef __checkReturn boolean_t
@@ -1784,7 +1989,7 @@ typedef __checkReturn boolean_t
typedef struct efx_ev_callbacks_s {
efx_initialized_ev_t eec_initialized;
efx_rx_ev_t eec_rx;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
efx_rx_ps_ev_t eec_rx_ps;
#endif
efx_tx_ev_t eec_tx;
@@ -1888,14 +2093,35 @@ efx_rx_scatter_enable(
typedef enum efx_rx_hash_alg_e {
EFX_RX_HASHALG_LFSR = 0,
- EFX_RX_HASHALG_TOEPLITZ
+ EFX_RX_HASHALG_TOEPLITZ,
+ EFX_RX_HASHALG_PACKED_STREAM,
+ EFX_RX_NHASHALGS
} efx_rx_hash_alg_t;
+/*
+ * Legacy hash type flags.
+ *
+ * They represent standard tuples for distinct traffic classes.
+ */
#define EFX_RX_HASH_IPV4 (1U << 0)
#define EFX_RX_HASH_TCPIPV4 (1U << 1)
#define EFX_RX_HASH_IPV6 (1U << 2)
#define EFX_RX_HASH_TCPIPV6 (1U << 3)
+#define EFX_RX_HASH_LEGACY_MASK \
+ (EFX_RX_HASH_IPV4 | \
+ EFX_RX_HASH_TCPIPV4 | \
+ EFX_RX_HASH_IPV6 | \
+ EFX_RX_HASH_TCPIPV6)
+
+/*
+ * The type of the argument used by efx_rx_scale_mode_set() to
+ * provide a means for the client drivers to configure hashing.
+ *
+ * A properly constructed value can either be:
+ * - a combination of legacy flags
+ * - a combination of EFX_RX_HASH() flags
+ */
typedef unsigned int efx_rx_hash_type_t;
typedef enum efx_rx_hash_support_e {
@@ -1914,6 +2140,92 @@ typedef enum efx_rx_scale_context_type_e {
EFX_RX_SCALE_SHARED /* Read-only key/indirection table */
} efx_rx_scale_context_type_t;
+/*
+ * Traffic classes eligible for hash computation.
+ *
+ * Select packet headers used in computing the receive hash.
+ * This uses the same encoding as the RSS_MODES field of
+ * MC_CMD_RSS_CONTEXT_SET_FLAGS.
+ */
+#define EFX_RX_CLASS_IPV4_TCP_LBN 8
+#define EFX_RX_CLASS_IPV4_TCP_WIDTH 4
+#define EFX_RX_CLASS_IPV4_UDP_LBN 12
+#define EFX_RX_CLASS_IPV4_UDP_WIDTH 4
+#define EFX_RX_CLASS_IPV4_LBN 16
+#define EFX_RX_CLASS_IPV4_WIDTH 4
+#define EFX_RX_CLASS_IPV6_TCP_LBN 20
+#define EFX_RX_CLASS_IPV6_TCP_WIDTH 4
+#define EFX_RX_CLASS_IPV6_UDP_LBN 24
+#define EFX_RX_CLASS_IPV6_UDP_WIDTH 4
+#define EFX_RX_CLASS_IPV6_LBN 28
+#define EFX_RX_CLASS_IPV6_WIDTH 4
+
+#define EFX_RX_NCLASSES 6
+
+/*
+ * Ancillary flags used to construct generic hash tuples.
+ * This uses the same encoding as RSS_MODE_HASH_SELECTOR.
+ */
+#define EFX_RX_CLASS_HASH_SRC_ADDR (1U << 0)
+#define EFX_RX_CLASS_HASH_DST_ADDR (1U << 1)
+#define EFX_RX_CLASS_HASH_SRC_PORT (1U << 2)
+#define EFX_RX_CLASS_HASH_DST_PORT (1U << 3)
+
+/*
+ * Generic hash tuples.
+ *
+ * They express combinations of packet fields
+ * which can contribute to the hash value for
+ * a particular traffic class.
+ */
+#define EFX_RX_CLASS_HASH_DISABLE 0
+
+#define EFX_RX_CLASS_HASH_1TUPLE_SRC EFX_RX_CLASS_HASH_SRC_ADDR
+#define EFX_RX_CLASS_HASH_1TUPLE_DST EFX_RX_CLASS_HASH_DST_ADDR
+
+#define EFX_RX_CLASS_HASH_2TUPLE \
+ (EFX_RX_CLASS_HASH_SRC_ADDR | \
+ EFX_RX_CLASS_HASH_DST_ADDR)
+
+#define EFX_RX_CLASS_HASH_2TUPLE_SRC \
+ (EFX_RX_CLASS_HASH_SRC_ADDR | \
+ EFX_RX_CLASS_HASH_SRC_PORT)
+
+#define EFX_RX_CLASS_HASH_2TUPLE_DST \
+ (EFX_RX_CLASS_HASH_DST_ADDR | \
+ EFX_RX_CLASS_HASH_DST_PORT)
+
+#define EFX_RX_CLASS_HASH_4TUPLE \
+ (EFX_RX_CLASS_HASH_SRC_ADDR | \
+ EFX_RX_CLASS_HASH_DST_ADDR | \
+ EFX_RX_CLASS_HASH_SRC_PORT | \
+ EFX_RX_CLASS_HASH_DST_PORT)
+
+#define EFX_RX_CLASS_HASH_NTUPLES 7
+
+/*
+ * Hash flag constructor.
+ *
+ * Resulting flags encode hash tuples for specific traffic classes.
+ * The client drivers are encouraged to use these flags to form
+ * a hash type value.
+ */
+#define EFX_RX_HASH(_class, _tuple) \
+ EFX_INSERT_FIELD_NATIVE32(0, 31, \
+ EFX_RX_CLASS_##_class, EFX_RX_CLASS_HASH_##_tuple)
+
+/*
+ * The maximum number of EFX_RX_HASH() flags.
+ */
+#define EFX_RX_HASH_NFLAGS (EFX_RX_NCLASSES * EFX_RX_CLASS_HASH_NTUPLES)
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_hash_flags_get(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t hash_alg,
+ __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags,
+ __out unsigned int *nflagsp);
+
extern __checkReturn efx_rc_t
efx_rx_hash_default_support_get(
__in efx_nic_t *enp,
@@ -1984,6 +2296,7 @@ efx_pseudo_hdr_pkt_length_get(
typedef enum efx_rxq_type_e {
EFX_RXQ_TYPE_DEFAULT,
EFX_RXQ_TYPE_PACKED_STREAM,
+ EFX_RXQ_TYPE_ES_SUPER_BUFFER,
EFX_RXQ_NTYPES
} efx_rxq_type_t;
@@ -2037,6 +2350,28 @@ efx_rx_qcreate_packed_stream(
#endif
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+/* Maximum head-of-line block timeout in nanoseconds */
+#define EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX (400U * 1000 * 1000)
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate_es_super_buffer(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t n_bufs_per_desc,
+ __in uint32_t max_dma_len,
+ __in uint32_t buf_stride,
+ __in uint32_t hol_block_timeout,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+#endif
+
typedef struct efx_buffer_s {
efsys_dma_addr_t eb_addr;
size_t eb_size;
@@ -2226,6 +2561,7 @@ extern void
efx_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t tcp_mss,
__out_ecount(count) efx_desc_t *edp,
@@ -2237,6 +2573,12 @@ efx_tx_qdesc_vlantci_create(
__in uint16_t tci,
__out efx_desc_t *edp);
+extern void
+efx_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp);
+
#if EFSYS_OPT_QSTATS
#if EFSYS_OPT_NAMES
@@ -2285,6 +2627,10 @@ efx_tx_qdestroy(
#define EFX_FILTER_FLAG_RX 0x08
/* Filter is for TX */
#define EFX_FILTER_FLAG_TX 0x10
+/* Set match flag on the received packet */
+#define EFX_FILTER_FLAG_ACTION_FLAG 0x20
+/* Set match mark on the received packet */
+#define EFX_FILTER_FLAG_ACTION_MARK 0x40
typedef uint8_t efx_filter_flags_t;
@@ -2313,10 +2659,19 @@ typedef uint8_t efx_filter_flags_t;
#define EFX_FILTER_MATCH_OUTER_VID 0x00000100
/* Match by IP transport protocol */
#define EFX_FILTER_MATCH_IP_PROTO 0x00000200
+/* Match by VNI or VSID */
+#define EFX_FILTER_MATCH_VNI_OR_VSID 0x00000800
+/* For encapsulated packets, match by inner frame local MAC address */
+#define EFX_FILTER_MATCH_IFRM_LOC_MAC 0x00010000
/* For encapsulated packets, match all multicast inner frames */
#define EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 0x01000000
/* For encapsulated packets, match all unicast inner frames */
#define EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST 0x02000000
+/*
+ * Match by encap type, this flag does not correspond to
+ * the MCDI match flags and any unoccupied value may be used
+ */
+#define EFX_FILTER_MATCH_ENCAP_TYPE 0x20000000
/* Match otherwise-unmatched multicast and broadcast packets */
#define EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 0x40000000
/* Match otherwise-unmatched unicast packets */
@@ -2359,6 +2714,9 @@ typedef struct efx_filter_spec_s {
uint16_t efs_rem_port;
efx_oword_t efs_rem_host;
efx_oword_t efs_loc_host;
+ uint8_t efs_vni_or_vsid[EFX_VNI_OR_VSID_LEN];
+ uint8_t efs_ifrm_loc_mac[EFX_MAC_ADDR_LEN];
+ uint32_t efs_mark;
} efx_filter_spec_t;
@@ -2454,6 +2812,13 @@ efx_filter_spec_set_encap_type(
__in efx_tunnel_protocol_t encap_type,
__in efx_filter_inner_frame_match_t inner_frame_match);
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_vxlan_full(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vxlan_id,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr);
+
#if EFSYS_OPT_RX_SCALE
extern __checkReturn efx_rc_t
efx_filter_spec_set_rss_context(
@@ -2659,6 +3024,38 @@ efx_tunnel_reconfigure(
#endif /* EFSYS_OPT_TUNNEL */
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+/**
+ * Firmware subvariant choice options.
+ *
+ * It may be switched to no Tx checksum if attached drivers are either
+ * preboot or firmware subvariant aware and no VIS are allocated.
+ * If may be always switched to default explicitly using set request or
+ * implicitly if unaware driver is attaching. If switching is done when
+ * a driver is attached, it gets MC_REBOOT event and should recreate its
+ * datapath.
+ *
+ * See SF-119419-TC DPDK Firmware Driver Interface and
+ * SF-109306-TC EF10 for Driver Writers for details.
+ */
+typedef enum efx_nic_fw_subvariant_e {
+ EFX_NIC_FW_SUBVARIANT_DEFAULT = 0,
+ EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM = 1,
+ EFX_NIC_FW_SUBVARIANT_NTYPES
+} efx_nic_fw_subvariant_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_fw_subvariant(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_subvariant_t *subvariantp);
+
+extern __checkReturn efx_rc_t
+efx_nic_set_fw_subvariant(
+ __in efx_nic_t *enp,
+ __in efx_nic_fw_subvariant_t subvariant);
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
#ifdef __cplusplus
}
diff --git a/drivers/net/sfc/base/efx_bootcfg.c b/drivers/net/sfc/base/efx_bootcfg.c
index 0f71936f..715e18e8 100644
--- a/drivers/net/sfc/base/efx_bootcfg.c
+++ b/drivers/net/sfc/base/efx_bootcfg.c
@@ -68,6 +68,20 @@ efx_bootcfg_sector_info(
}
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2: {
+ /* Shared partition (array indexed by PF) */
+ max_size = BOOTCFG_PER_PF;
+ count = BOOTCFG_PF_COUNT;
+ if (pf >= count) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ offset = max_size * pf;
+ break;
+ }
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -82,6 +96,10 @@ efx_bootcfg_sector_info(
return (0);
+#if EFSYS_OPT_MEDFORD2
+fail3:
+ EFSYS_PROBE(fail3);
+#endif
#if EFSYS_OPT_MEDFORD
fail2:
EFSYS_PROBE(fail2);
@@ -191,19 +209,25 @@ efx_bootcfg_copy_sector(
size_t used_bytes;
efx_rc_t rc;
+ /* Minimum buffer is checksum byte and DHCP_END terminator */
+ if (data_size < 2) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
/* Verify that the area is correctly formatted and checksummed */
rc = efx_bootcfg_verify(enp, sector, sector_length,
&used_bytes);
if (!handle_format_errors) {
if (rc != 0)
- goto fail1;
+ goto fail2;
if ((used_bytes < 2) ||
(sector[used_bytes - 1] != DHCP_END)) {
/* Block too short, or DHCP_END missing */
rc = ENOENT;
- goto fail2;
+ goto fail3;
}
}
@@ -237,9 +261,13 @@ efx_bootcfg_copy_sector(
*/
if (used_bytes > data_size) {
rc = ENOSPC;
- goto fail3;
+ goto fail4;
}
- memcpy(data, sector, used_bytes);
+
+ data[0] = 0; /* checksum, updated below */
+
+ /* Copy all after the checksum to the target buffer */
+ memcpy(data + 1, sector + 1, used_bytes - 1);
/* Zero out the unused portion of the target buffer */
if (used_bytes < data_size)
@@ -253,6 +281,8 @@ efx_bootcfg_copy_sector(
return (0);
+fail4:
+ EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
@@ -277,20 +307,31 @@ efx_bootcfg_read(
efx_rc_t rc;
uint32_t sector_number;
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ /* Minimum buffer is checksum byte and DHCP_END terminator */
+ if (size < 2) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
sector_number = enp->en_nic_cfg.enc_pf;
#else
sector_number = 0;
#endif
rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
if (rc != 0)
- goto fail1;
+ goto fail2;
/* The bootcfg sector may be stored in a (larger) shared partition */
rc = efx_bootcfg_sector_info(enp, sector_number,
NULL, &sector_offset, &sector_length);
if (rc != 0)
- goto fail2;
+ goto fail3;
+
+ if (sector_length < 2) {
+ rc = EINVAL;
+ goto fail4;
+ }
if (sector_length > BOOTCFG_MAX_SIZE)
sector_length = BOOTCFG_MAX_SIZE;
@@ -298,7 +339,7 @@ efx_bootcfg_read(
if (sector_offset + sector_length > partn_length) {
/* Partition is too small */
rc = EFBIG;
- goto fail3;
+ goto fail5;
}
/*
@@ -311,28 +352,28 @@ efx_bootcfg_read(
EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
if (payload == NULL) {
rc = ENOMEM;
- goto fail4;
+ goto fail6;
}
} else
payload = (uint8_t *)data;
if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
- goto fail5;
+ goto fail7;
if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
sector_offset, (caddr_t)payload, sector_length)) != 0) {
(void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
- goto fail6;
+ goto fail8;
}
if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
- goto fail7;
+ goto fail9;
/* Verify that the area is correctly formatted and checksummed */
rc = efx_bootcfg_verify(enp, payload, sector_length,
&used_bytes);
if (rc != 0 || used_bytes == 0) {
- payload[0] = (uint8_t)(~DHCP_END & 0xff);
+ payload[0] = 0;
payload[1] = DHCP_END;
used_bytes = 2;
}
@@ -347,10 +388,8 @@ efx_bootcfg_read(
* so reinitialise the sector if there isn't room for the character.
*/
if (payload[used_bytes - 1] != DHCP_END) {
- if (used_bytes + 1 > sector_length) {
- payload[0] = 0;
+ if (used_bytes >= sector_length)
used_bytes = 1;
- }
payload[used_bytes] = DHCP_END;
++used_bytes;
@@ -362,10 +401,14 @@ efx_bootcfg_read(
*/
if (used_bytes > size) {
rc = ENOSPC;
- goto fail8;
+ goto fail10;
}
+
+ data[0] = 0; /* checksum, updated below */
+
if (sector_length > size) {
- memcpy(data, payload, used_bytes);
+ /* Copy all after the checksum to the target buffer */
+ memcpy(data + 1, payload + 1, used_bytes - 1);
EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
}
@@ -381,16 +424,20 @@ efx_bootcfg_read(
return (0);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
fail8:
EFSYS_PROBE(fail8);
fail7:
EFSYS_PROBE(fail7);
+ if (sector_length > size)
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
fail6:
EFSYS_PROBE(fail6);
fail5:
EFSYS_PROBE(fail5);
- if (sector_length > size)
- EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
fail4:
EFSYS_PROBE(fail4);
fail3:
@@ -418,7 +465,7 @@ efx_bootcfg_write(
efx_rc_t rc;
uint32_t sector_number;
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
sector_number = enp->en_nic_cfg.enc_pf;
#else
sector_number = 0;
diff --git a/drivers/net/sfc/base/efx_check.h b/drivers/net/sfc/base/efx_check.h
index 58377759..ef5eadc6 100644
--- a/drivers/net/sfc/base/efx_check.h
+++ b/drivers/net/sfc/base/efx_check.h
@@ -30,8 +30,9 @@
#if EFSYS_OPT_CHECK_REG
/* Verify chip implements accessed registers */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_CHECK_REG */
@@ -44,15 +45,17 @@
#if EFSYS_OPT_DIAG
/* Support diagnostic hardware tests */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "DIAG requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "DIAG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_DIAG */
#if EFSYS_OPT_EV_PREFETCH
/* Support optimized EVQ data access */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_EV_PREFETCH */
@@ -62,21 +65,23 @@
#if EFSYS_OPT_FILTER
/* Support hardware packet filters */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "FILTER requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "FILTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_FILTER */
-#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
# if !EFSYS_OPT_FILTER
-# error "HUNTINGTON or MEDFORD requires FILTER"
+# error "HUNTINGTON or MEDFORD or MEDFORD2 requires FILTER"
# endif
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_LOOPBACK
/* Support hardware loopback modes */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_LOOPBACK */
@@ -90,21 +95,24 @@
#if EFSYS_OPT_MAC_STATS
/* Support MAC statistics */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_MAC_STATS */
#if EFSYS_OPT_MCDI
/* Support management controller messages */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_MCDI */
-#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
# if !EFSYS_OPT_MCDI
-# error "SIENA or HUNTINGTON or MEDFORD requires MCDI"
+# error "SIENA or HUNTINGTON or MEDFORD or MEDFORD2 requires MCDI"
# endif
#endif
@@ -144,15 +152,17 @@
#if EFSYS_OPT_MON_STATS
/* Support monitor statistics (voltage/temperature) */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_MON_STATS */
#if EFSYS_OPT_MON_MCDI
/* Support Monitor via mcdi */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_MON_MCDI*/
@@ -166,11 +176,19 @@
#if EFSYS_OPT_NVRAM
/* Support non volatile configuration */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_NVRAM */
+#if EFSYS_OPT_IMAGE_LAYOUT
+/* Support signed image layout handling */
+# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "IMAGE_LAYOUT requires MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_IMAGE_LAYOUT */
+
#ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM
# error "NVRAM_FALCON_BOOTROM is obsolete and is not supported."
#endif
@@ -200,8 +218,9 @@
#if EFSYS_OPT_PHY_LED_CONTROL
/* Support for PHY LED control */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_PHY_LED_CONTROL */
@@ -246,8 +265,9 @@
#if EFSYS_OPT_QSTATS
/* Support EVQ/RXQ/TXQ statistics */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_QSTATS */
@@ -257,15 +277,17 @@
#if EFSYS_OPT_RX_SCALE
/* Support receive scaling (RSS) */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_RX_SCALE */
#if EFSYS_OPT_RX_SCATTER
/* Support receive scatter DMA */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_RX_SCATTER */
@@ -275,8 +297,9 @@
#if EFSYS_OPT_VPD
/* Support PCI Vital Product Data (VPD) */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "VPD requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "VPD requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_VPD */
@@ -290,8 +313,9 @@
#if EFSYS_OPT_BIST
/* Support BIST */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "BIST requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "BIST requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_BIST */
@@ -307,23 +331,37 @@
#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
/* Support adapters with missing static config (for factory use only) */
-# if !EFSYS_OPT_MEDFORD
-# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD"
+# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
#if EFSYS_OPT_RX_PACKED_STREAM
/* Support packed stream mode */
-# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "PACKED_STREAM requires HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "PACKED_STREAM requires HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif
+
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+/* Support equal stride super-buffer mode */
+# if !(EFSYS_OPT_MEDFORD2)
+# error "ES_SUPER_BUFFER requires MEDFORD2"
# endif
#endif
/* Support hardware assistance for tunnels */
#if EFSYS_OPT_TUNNEL
-# if !EFSYS_OPT_MEDFORD
-# error "TUNNEL requires MEDFORD"
+# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "TUNNEL requires MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_TUNNEL */
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+/* Advertise that the driver is firmware subvariant aware */
+# if !(EFSYS_OPT_MEDFORD2)
+# error "FW_SUBVARIANT_AWARE requires MEDFORD2"
+# endif
+#endif
+
#endif /* _SYS_EFX_CHECK_H */
diff --git a/drivers/net/sfc/base/efx_ev.c b/drivers/net/sfc/base/efx_ev.c
index 949d352a..1139cc26 100644
--- a/drivers/net/sfc/base/efx_ev.c
+++ b/drivers/net/sfc/base/efx_ev.c
@@ -91,7 +91,7 @@ static const efx_ev_ops_t __efx_ev_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_ev_ops_t __efx_ev_ef10_ops = {
ef10_ev_init, /* eevo_init */
ef10_ev_fini, /* eevo_fini */
@@ -104,7 +104,7 @@ static const efx_ev_ops_t __efx_ev_ef10_ops = {
ef10_ev_qstats_update, /* eevo_qstats_update */
#endif
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
@@ -141,6 +141,12 @@ efx_ev_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
diff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c
index b92541aa..412298ac 100644
--- a/drivers/net/sfc/base/efx_filter.c
+++ b/drivers/net/sfc/base/efx_filter.c
@@ -56,7 +56,7 @@ static const efx_filter_ops_t __efx_filter_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_filter_ops_t __efx_filter_ef10_ops = {
ef10_filter_init, /* efo_init */
ef10_filter_fini, /* efo_fini */
@@ -66,7 +66,7 @@ static const efx_filter_ops_t __efx_filter_ef10_ops = {
ef10_filter_supported_filters, /* efo_supported_filters */
ef10_filter_reconfigure, /* efo_reconfigure */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_filter_insert(
@@ -74,12 +74,33 @@ efx_filter_insert(
__inout efx_filter_spec_t *spec)
{
const efx_filter_ops_t *efop = enp->en_efop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
EFSYS_ASSERT3P(spec, !=, NULL);
EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+ if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) &&
+ !encp->enc_filter_action_mark_supported) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) &&
+ !encp->enc_filter_action_flag_supported) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
return (efop->efo_add(enp, spec, B_FALSE));
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
__checkReturn efx_rc_t
@@ -145,6 +166,12 @@ efx_filter_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -412,7 +439,7 @@ efx_filter_spec_set_encap_type(
__in efx_tunnel_protocol_t encap_type,
__in efx_filter_inner_frame_match_t inner_frame_match)
{
- uint32_t match_flags = 0;
+ uint32_t match_flags = EFX_FILTER_MATCH_ENCAP_TYPE;
uint8_t ip_proto;
efx_rc_t rc;
@@ -462,6 +489,43 @@ fail1:
return (rc);
}
+/*
+ * Specify inner and outer Ethernet address and VXLAN ID in filter
+ * specification.
+ */
+ __checkReturn efx_rc_t
+efx_filter_spec_set_vxlan_full(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vxlan_id,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(vxlan_id, !=, NULL);
+ EFSYS_ASSERT3P(inner_addr, !=, NULL);
+ EFSYS_ASSERT3P(outer_addr, !=, NULL);
+
+ if ((inner_addr == NULL) && (outer_addr == NULL))
+ return (EINVAL);
+
+ if (vxlan_id != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
+ memcpy(spec->efs_vni_or_vsid, vxlan_id, EFX_VNI_OR_VSID_LEN);
+ }
+ if (outer_addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->efs_loc_mac, outer_addr, EFX_MAC_ADDR_LEN);
+ }
+ if (inner_addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_IFRM_LOC_MAC;
+ memcpy(spec->efs_ifrm_loc_mac, inner_addr, EFX_MAC_ADDR_LEN);
+ }
+ spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
+
+ return (0);
+}
+
#if EFSYS_OPT_RX_SCALE
__checkReturn efx_rc_t
efx_filter_spec_set_rss_context(
@@ -953,6 +1017,7 @@ siena_filter_build(
default:
EFSYS_ASSERT(B_FALSE);
+ EFX_ZERO_OWORD(*filter);
return (0);
}
diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h
index ed685cba..548834f9 100644
--- a/drivers/net/sfc/base/efx_impl.h
+++ b/drivers/net/sfc/base/efx_impl.h
@@ -29,9 +29,13 @@
#include "medford_impl.h"
#endif /* EFSYS_OPT_MEDFORD */
-#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#if EFSYS_OPT_MEDFORD2
+#include "medford2_impl.h"
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
#include "ef10_impl.h"
-#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */
#ifdef __cplusplus
extern "C" {
@@ -61,6 +65,7 @@ typedef enum efx_mac_type_e {
EFX_MAC_SIENA,
EFX_MAC_HUNTINGTON,
EFX_MAC_MEDFORD,
+ EFX_MAC_MEDFORD2,
EFX_MAC_NTYPES
} efx_mac_type_t;
@@ -112,16 +117,36 @@ typedef struct efx_tx_ops_s {
uint32_t, uint8_t,
efx_desc_t *);
void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t,
- uint32_t, uint16_t,
+ uint16_t, uint32_t, uint16_t,
efx_desc_t *, int);
void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t,
efx_desc_t *);
+ void (*etxo_qdesc_checksum_create)(efx_txq_t *, uint16_t,
+ efx_desc_t *);
#if EFSYS_OPT_QSTATS
void (*etxo_qstats_update)(efx_txq_t *,
efsys_stat_t *);
#endif
} efx_tx_ops_t;
+typedef union efx_rxq_type_data_u {
+ /* Dummy member to have non-empty union if no options are enabled */
+ uint32_t ertd_dummy;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ struct {
+ uint32_t eps_buf_size;
+ } ertd_packed_stream;
+#endif
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+ struct {
+ uint32_t eessb_bufs_per_desc;
+ uint32_t eessb_max_dma_len;
+ uint32_t eessb_buf_stride;
+ uint32_t eessb_hol_block_timeout;
+ } ertd_es_super_buffer;
+#endif
+} efx_rxq_type_data_t;
+
typedef struct efx_rx_ops_s {
efx_rc_t (*erxo_init)(efx_nic_t *);
void (*erxo_fini)(efx_nic_t *);
@@ -158,7 +183,8 @@ typedef struct efx_rx_ops_s {
efx_rc_t (*erxo_qflush)(efx_rxq_t *);
void (*erxo_qenable)(efx_rxq_t *);
efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int,
- unsigned int, efx_rxq_type_t, uint32_t,
+ unsigned int, efx_rxq_type_t,
+ const efx_rxq_type_data_t *,
efsys_mem_t *, size_t, uint32_t,
unsigned int,
efx_evq_t *, efx_rxq_t *);
@@ -398,9 +424,9 @@ typedef struct efx_filter_s {
#if EFSYS_OPT_SIENA
siena_filter_t *ef_siena_filter;
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
ef10_filter_table_t *ef_ef10_filter_table;
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
} efx_filter_t;
#if EFSYS_OPT_SIENA
@@ -640,6 +666,7 @@ struct efx_nic_s {
const efx_ev_ops_t *en_eevop;
const efx_tx_ops_t *en_etxop;
const efx_rx_ops_t *en_erxop;
+ efx_fw_variant_t efv;
#if EFSYS_OPT_FILTER
efx_filter_t en_filter;
const efx_filter_ops_t *en_efop;
@@ -683,7 +710,7 @@ struct efx_nic_s {
#endif /* EFSYS_OPT_SIENA */
int enu_unused;
} en_u;
-#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
union en_arch {
struct {
int ena_vi_base;
@@ -704,7 +731,7 @@ struct efx_nic_s {
size_t ena_wc_mem_map_size;
} ef10;
} en_arch;
-#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */
};
@@ -716,9 +743,11 @@ typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *,
typedef struct efx_evq_rxq_state_s {
unsigned int eers_rx_read_ptr;
unsigned int eers_rx_mask;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
unsigned int eers_rx_stream_npackets;
boolean_t eers_rx_packed_stream;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
unsigned int eers_rx_packed_stream_credits;
#endif
} efx_evq_rxq_state_t;
@@ -825,6 +854,10 @@ struct efx_txq_s {
rev = 'E'; \
break; \
\
+ case EFX_FAMILY_MEDFORD2: \
+ rev = 'F'; \
+ break; \
+ \
default: \
rev = '?'; \
break; \
@@ -915,6 +948,15 @@ struct efx_txq_s {
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+/*
+ * Accessors for memory BAR non-VI tables.
+ *
+ * Code used on EF10 *must* use EFX_BAR_VI_*() macros for per-VI registers,
+ * to ensure the correct runtime VI window size is used on Medford2.
+ *
+ * Siena-only code may continue using EFX_BAR_TBL_*() macros for VI registers.
+ */
+
#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
@@ -941,21 +983,6 @@ struct efx_txq_s {
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
-#define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \
- do { \
- EFX_CHECK_REG((_enp), (_reg)); \
- EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
- uint32_t, (_index), \
- uint32_t, _reg ## _OFST, \
- uint32_t, (_edp)->ed_u32[0]); \
- EFSYS_BAR_WRITED((_enp)->en_esbp, \
- (_reg ## _OFST + \
- (2 * sizeof (efx_dword_t)) + \
- ((_index) * _reg ## _STEP)), \
- (_edp), (_lock)); \
- _NOTE(CONSTANTCONDITION) \
- } while (B_FALSE)
-
#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
@@ -1032,16 +1059,66 @@ struct efx_txq_s {
} while (B_FALSE)
/*
- * Allow drivers to perform optimised 128-bit doorbell writes.
+ * Accessors for memory BAR per-VI registers.
+ *
+ * The VI window size is 8KB for Medford and all earlier controllers.
+ * For Medford2, the VI window size can be 8KB, 16KB or 64KB.
+ */
+
+#define EFX_BAR_VI_READD(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, \
+ ((_reg ## _OFST) + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_edp), (_lock)); \
+ EFSYS_PROBE4(efx_bar_vi_readd, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_VI_WRITED(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ ((_reg ## _OFST) + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_VI_WRITED2(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ ((_reg ## _OFST) + \
+ (2 * sizeof (efx_dword_t)) + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Allow drivers to perform optimised 128-bit VI doorbell writes.
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
* special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid
* the need for locking in the host, and are the only ones known to be safe to
* use 128-bites write with.
*/
-#define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \
+#define EFX_BAR_VI_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
- EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \
+ EFSYS_PROBE7(efx_bar_vi_doorbell_writeo, \
const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
@@ -1050,7 +1127,8 @@ struct efx_txq_s {
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \
- (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_reg ## _OFST + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
(_eop)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
diff --git a/drivers/net/sfc/base/efx_intr.c b/drivers/net/sfc/base/efx_intr.c
index 83ca177d..b518916d 100644
--- a/drivers/net/sfc/base/efx_intr.c
+++ b/drivers/net/sfc/base/efx_intr.c
@@ -75,7 +75,7 @@ static const efx_intr_ops_t __efx_intr_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_intr_ops_t __efx_intr_ef10_ops = {
ef10_intr_init, /* eio_init */
ef10_intr_enable, /* eio_enable */
@@ -87,7 +87,7 @@ static const efx_intr_ops_t __efx_intr_ef10_ops = {
ef10_intr_fatal, /* eio_fatal */
ef10_intr_fini, /* eio_fini */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_intr_init(
@@ -132,6 +132,12 @@ efx_intr_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(B_FALSE);
rc = ENOTSUP;
@@ -283,6 +289,12 @@ siena_intr_init(
{
efx_intr_t *eip = &(enp->en_intr);
efx_oword_t oword;
+ efx_rc_t rc;
+
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_INTR_SIZE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
/*
* bug17213 workaround.
@@ -314,6 +326,11 @@ siena_intr_init(
EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
static void
diff --git a/drivers/net/sfc/base/efx_lic.c b/drivers/net/sfc/base/efx_lic.c
index ad4d2211..49c00347 100644
--- a/drivers/net/sfc/base/efx_lic.c
+++ b/drivers/net/sfc/base/efx_lic.c
@@ -10,6 +10,9 @@
#if EFSYS_OPT_LICENSING
#include "ef10_tlv_layout.h"
+#if EFSYS_OPT_SIENA
+#include "efx_regs_mcdi_aoe.h"
+#endif
#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
@@ -162,7 +165,7 @@ static const efx_lic_ops_t __efx_lic_v2_ops = {
#endif /* EFSYS_OPT_HUNTINGTON */
-#if EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn efx_rc_t
efx_mcdi_licensing_v3_update_licenses(
@@ -286,7 +289,7 @@ static const efx_lic_ops_t __efx_lic_v3_ops = {
efx_lic_v3_finish_partition, /* elo_finish_partition */
};
-#endif /* EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
/* V1 Licensing - used in Siena Modena only */
@@ -819,7 +822,7 @@ fail1:
/* V3 Licensing - used starting from Medford family. See SF-114884-SW */
-#if EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn efx_rc_t
efx_mcdi_licensing_v3_update_licenses(
@@ -829,7 +832,8 @@ efx_mcdi_licensing_v3_update_licenses(
uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
efx_rc_t rc;
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD2));
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING_V3;
@@ -866,7 +870,8 @@ efx_mcdi_licensing_v3_report_license(
MC_CMD_LICENSING_V3_OUT_LEN)];
efx_rc_t rc;
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD2));
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING_V3;
@@ -930,7 +935,8 @@ efx_mcdi_licensing_v3_app_state(
uint32_t app_state;
efx_rc_t rc;
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD2));
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
@@ -1262,7 +1268,7 @@ fail1:
}
-#endif /* EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_lic_init(
@@ -1296,6 +1302,12 @@ efx_lic_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ elop = &__efx_lic_v3_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
diff --git a/drivers/net/sfc/base/efx_mac.c b/drivers/net/sfc/base/efx_mac.c
index 511f3eb5..57436b95 100644
--- a/drivers/net/sfc/base/efx_mac.c
+++ b/drivers/net/sfc/base/efx_mac.c
@@ -39,7 +39,7 @@ static const efx_mac_ops_t __efx_mac_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_mac_ops_t __efx_mac_ef10_ops = {
ef10_mac_poll, /* emo_poll */
ef10_mac_up, /* emo_up */
@@ -62,7 +62,7 @@ static const efx_mac_ops_t __efx_mac_ef10_ops = {
ef10_mac_stats_update /* emo_stats_update */
#endif /* EFSYS_OPT_MAC_STATS */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_mac_pdu_set(
@@ -492,7 +492,7 @@ efx_mac_filter_default_rxq_clear(
#if EFSYS_OPT_NAMES
-/* START MKCONFIG GENERATED EfxMacStatNamesBlock c11b91b42f922516 */
+/* START MKCONFIG GENERATED EfxMacStatNamesBlock 1a45a82fcfb30c1b */
static const char * const __efx_mac_stat_name[] = {
"rx_octets",
"rx_pkts",
@@ -575,6 +575,31 @@ static const char * const __efx_mac_stat_name[] = {
"vadapter_tx_bad_packets",
"vadapter_tx_bad_bytes",
"vadapter_tx_overflow",
+ "fec_uncorrected_errors",
+ "fec_corrected_errors",
+ "fec_corrected_symbols_lane0",
+ "fec_corrected_symbols_lane1",
+ "fec_corrected_symbols_lane2",
+ "fec_corrected_symbols_lane3",
+ "ctpio_vi_busy_fallback",
+ "ctpio_long_write_success",
+ "ctpio_missing_dbell_fail",
+ "ctpio_overflow_fail",
+ "ctpio_underflow_fail",
+ "ctpio_timeout_fail",
+ "ctpio_noncontig_wr_fail",
+ "ctpio_frm_clobber_fail",
+ "ctpio_invalid_wr_fail",
+ "ctpio_vi_clobber_fallback",
+ "ctpio_unqualified_fallback",
+ "ctpio_runt_fallback",
+ "ctpio_success",
+ "ctpio_fallback",
+ "ctpio_poison",
+ "ctpio_erase",
+ "rxdp_scatter_disabled_trunc",
+ "rxdp_hlb_idle",
+ "rxdp_hlb_timeout",
};
/* END MKCONFIG GENERATED EfxMacStatNamesBlock */
@@ -826,6 +851,13 @@ efx_mac_select(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ emop = &__efx_mac_ef10_ops;
+ type = EFX_MAC_MEDFORD2;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
rc = EINVAL;
goto fail1;
diff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c
index 347a5b35..d4ebcf26 100644
--- a/drivers/net/sfc/base/efx_mcdi.c
+++ b/drivers/net/sfc/base/efx_mcdi.c
@@ -45,7 +45,7 @@ static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
ef10_mcdi_init, /* emco_init */
@@ -58,7 +58,7 @@ static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
ef10_mcdi_get_timeout, /* emco_get_timeout */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
@@ -92,6 +92,12 @@ efx_mcdi_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -1258,13 +1264,21 @@ efx_mcdi_drv_attach(
req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
/*
- * Use DONT_CARE for the datapath firmware type to ensure that the
- * driver can attach to an unprivileged function. The datapath firmware
- * type to use is controlled by the 'sfboot' utility.
+ * Typically, client drivers use DONT_CARE for the datapath firmware
+ * type to ensure that the driver can attach to an unprivileged
+ * function. The datapath firmware type to use is controlled by the
+ * 'sfboot' utility.
+ * If a client driver wishes to attach with a specific datapath firmware
+ * type, that can be passed in second argument of efx_nic_probe API. One
+ * such example is the ESXi native driver that attempts attaching with
+ * FULL_FEATURED datapath firmware type first and fall backs to
+ * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails.
*/
- MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0);
+ MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE,
+ DRV_ATTACH_IN_ATTACH, attach ? 1 : 0,
+ DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE);
MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
- MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv);
efx_mcdi_execute(enp, &req);
@@ -1426,6 +1440,11 @@ efx_mcdi_get_phy_cfg(
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
MC_CMD_GET_PHY_CFG_OUT_LEN)];
+#if EFSYS_OPT_NAMES
+ const char *namep;
+ size_t namelen;
+#endif
+ uint32_t phy_media_type;
efx_rc_t rc;
(void) memset(payload, 0, sizeof (payload));
@@ -1449,10 +1468,12 @@ efx_mcdi_get_phy_cfg(
encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
#if EFSYS_OPT_NAMES
- (void) strncpy(encp->enc_phy_name,
- MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME),
- MIN(sizeof (encp->enc_phy_name) - 1,
- MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+ namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME);
+ namelen = MIN(sizeof (encp->enc_phy_name) - 1,
+ strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+ (void) memset(encp->enc_phy_name, 0,
+ sizeof (encp->enc_phy_name));
+ memcpy(encp->enc_phy_name, namep, namelen);
#endif /* EFSYS_OPT_NAMES */
(void) memset(encp->enc_phy_revision, 0,
sizeof (encp->enc_phy_revision));
@@ -1474,8 +1495,8 @@ efx_mcdi_get_phy_cfg(
EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
- epp->ep_fixed_port_type =
- (efx_phy_media_type_t) MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type;
if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
@@ -1621,7 +1642,7 @@ fail1:
#if EFSYS_OPT_BIST
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
/*
* Enter bist offline mode. This is a fw mode which puts the NIC into a state
* where memory BIST tests can be run and not much else can interfere or happen.
@@ -1657,7 +1678,7 @@ fail1:
return (rc);
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_mcdi_bist_start(
@@ -1778,7 +1799,7 @@ efx_mcdi_mac_stats(
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
- MC_CMD_MAC_STATS_OUT_DMA_LEN)];
+ MC_CMD_MAC_STATS_V2_OUT_DMA_LEN)];
int clear = (action == EFX_STATS_CLEAR);
int upload = (action == EFX_STATS_UPLOAD);
int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
@@ -1791,7 +1812,7 @@ efx_mcdi_mac_stats(
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN;
+ req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN;
MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, upload,
@@ -1801,19 +1822,35 @@ efx_mcdi_mac_stats(
MAC_STATS_IN_PERIODIC_NOEVENT, !events,
MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
- if (esmp != NULL) {
- int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
+ if (enable || events || upload) {
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t bytes;
+
+ /* Periodic stats or stats upload require a DMA buffer */
+ if (esmp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
+ /* MAC stats count too small for legacy MAC stats */
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t);
- EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
- EFX_MAC_STATS_SIZE);
+ if (EFSYS_MEM_SIZE(esmp) < bytes) {
+ /* DMA buffer too small */
+ rc = ENOSPC;
+ goto fail3;
+ }
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
EFSYS_MEM_ADDR(esmp) & 0xffffffff);
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
EFSYS_MEM_ADDR(esmp) >> 32);
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
- } else {
- EFSYS_ASSERT(!upload && !enable && !events);
}
/*
@@ -1831,12 +1868,18 @@ efx_mcdi_mac_stats(
if ((req.emr_rc != ENOENT) ||
(enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
rc = req.emr_rc;
- goto fail1;
+ goto fail4;
}
}
return (0);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -1921,7 +1964,7 @@ fail1:
#endif /* EFSYS_OPT_MAC_STATS */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
/*
* This function returns the pf and vf number of a function. If it is a pf the
@@ -2020,7 +2063,7 @@ fail1:
return (rc);
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_mcdi_set_workaround(
diff --git a/drivers/net/sfc/base/efx_mcdi.h b/drivers/net/sfc/base/efx_mcdi.h
index 4e69f048..253a9e60 100644
--- a/drivers/net/sfc/base/efx_mcdi.h
+++ b/drivers/net/sfc/base/efx_mcdi.h
@@ -166,11 +166,11 @@ efx_mcdi_mac_spoofing_supported(
#if EFSYS_OPT_BIST
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
extern __checkReturn efx_rc_t
efx_mcdi_bist_enable_offline(
__in efx_nic_t *enp);
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
extern __checkReturn efx_rc_t
efx_mcdi_bist_start(
__in efx_nic_t *enp,
diff --git a/drivers/net/sfc/base/efx_mon.c b/drivers/net/sfc/base/efx_mon.c
index 234a4201..9fc268ec 100644
--- a/drivers/net/sfc/base/efx_mon.c
+++ b/drivers/net/sfc/base/efx_mon.c
@@ -99,7 +99,7 @@ fail1:
#if EFSYS_OPT_NAMES
-/* START MKCONFIG GENERATED MonitorStatNamesBlock d92af1538001301f */
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 8150a068198c0f96 */
static const char * const __mon_stat_name[] = {
"value_2_5v",
"value_vccp1",
@@ -180,6 +180,10 @@ static const char * const __mon_stat_name[] = {
"board_back_temp",
"i1v8",
"i2v5",
+ "i3v3",
+ "i12v0",
+ "1v3",
+ "i1v3",
};
/* END MKCONFIG GENERATED MonitorStatNamesBlock */
diff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c
index e318c17d..6c162e03 100644
--- a/drivers/net/sfc/base/efx_nic.c
+++ b/drivers/net/sfc/base/efx_nic.c
@@ -7,11 +7,13 @@
#include "efx.h"
#include "efx_impl.h"
+
__checkReturn efx_rc_t
efx_family(
__in uint16_t venid,
__in uint16_t devid,
- __out efx_family_t *efp)
+ __out efx_family_t *efp,
+ __out unsigned int *membarp)
{
if (venid == EFX_PCI_VENID_SFC) {
switch (devid) {
@@ -21,12 +23,10 @@ efx_family(
* Hardware default for PF0 of uninitialised Siena.
* manftest must be able to cope with this device id.
*/
- *efp = EFX_FAMILY_SIENA;
- return (0);
-
case EFX_PCI_DEVID_BETHPAGE:
case EFX_PCI_DEVID_SIENA:
*efp = EFX_FAMILY_SIENA;
+ *membarp = EFX_MEM_BAR_SIENA;
return (0);
#endif /* EFSYS_OPT_SIENA */
@@ -36,17 +36,16 @@ efx_family(
* Hardware default for PF0 of uninitialised Huntington.
* manftest must be able to cope with this device id.
*/
- *efp = EFX_FAMILY_HUNTINGTON;
- return (0);
-
case EFX_PCI_DEVID_FARMINGDALE:
case EFX_PCI_DEVID_GREENPORT:
*efp = EFX_FAMILY_HUNTINGTON;
+ *membarp = EFX_MEM_BAR_HUNTINGTON_PF;
return (0);
case EFX_PCI_DEVID_FARMINGDALE_VF:
case EFX_PCI_DEVID_GREENPORT_VF:
*efp = EFX_FAMILY_HUNTINGTON;
+ *membarp = EFX_MEM_BAR_HUNTINGTON_VF;
return (0);
#endif /* EFSYS_OPT_HUNTINGTON */
@@ -56,18 +55,30 @@ efx_family(
* Hardware default for PF0 of uninitialised Medford.
* manftest must be able to cope with this device id.
*/
- *efp = EFX_FAMILY_MEDFORD;
- return (0);
-
case EFX_PCI_DEVID_MEDFORD:
*efp = EFX_FAMILY_MEDFORD;
+ *membarp = EFX_MEM_BAR_MEDFORD_PF;
return (0);
case EFX_PCI_DEVID_MEDFORD_VF:
*efp = EFX_FAMILY_MEDFORD;
+ *membarp = EFX_MEM_BAR_MEDFORD_VF;
return (0);
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_PCI_DEVID_MEDFORD2_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Medford2.
+ * manftest must be able to cope with this device id.
+ */
+ case EFX_PCI_DEVID_MEDFORD2:
+ case EFX_PCI_DEVID_MEDFORD2_VF:
+ *efp = EFX_FAMILY_MEDFORD2;
+ *membarp = EFX_MEM_BAR_MEDFORD2;
+ return (0);
+#endif /* EFSYS_OPT_MEDFORD2 */
+
case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */
default:
break;
@@ -78,6 +89,7 @@ efx_family(
return (ENOTSUP);
}
+
#if EFSYS_OPT_SIENA
static const efx_nic_ops_t __efx_nic_siena_ops = {
@@ -135,6 +147,25 @@ static const efx_nic_ops_t __efx_nic_medford_ops = {
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+
+static const efx_nic_ops_t __efx_nic_medford2_ops = {
+ ef10_nic_probe, /* eno_probe */
+ medford2_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_MEDFORD2 */
+
__checkReturn efx_rc_t
efx_nic_create(
@@ -213,6 +244,22 @@ efx_nic_create(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ enp->en_enop = &__efx_nic_medford2_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
rc = ENOTSUP;
goto fail2;
@@ -243,7 +290,8 @@ fail1:
__checkReturn efx_rc_t
efx_nic_probe(
- __in efx_nic_t *enp)
+ __in efx_nic_t *enp,
+ __in efx_fw_variant_t efv)
{
const efx_nic_ops_t *enop;
efx_rc_t rc;
@@ -254,7 +302,27 @@ efx_nic_probe(
#endif /* EFSYS_OPT_MCDI */
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
+ /* Ensure FW variant codes match with MC_CMD_FW codes */
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_FULL_FEATURED ==
+ MC_CMD_FW_FULL_FEATURED);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_LOW_LATENCY ==
+ MC_CMD_FW_LOW_LATENCY);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM ==
+ MC_CMD_FW_PACKED_STREAM);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_HIGH_TX_RATE ==
+ MC_CMD_FW_HIGH_TX_RATE);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1 ==
+ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_RULES_ENGINE ==
+ MC_CMD_FW_RULES_ENGINE);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_DPDK ==
+ MC_CMD_FW_DPDK);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_DONT_CARE ==
+ (int)MC_CMD_FW_DONT_CARE);
+
enop = enp->en_enop;
+ enp->efv = efv;
+
if ((rc = enop->eno_probe(enp)) != 0)
goto fail1;
@@ -536,6 +604,18 @@ efx_nic_get_fw_version(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+ /* Ensure RXDP_FW_ID codes match with MC_CMD_GET_CAPABILITIES codes */
+ EFX_STATIC_ASSERT(EFX_RXDP_FULL_FEATURED_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP);
+ EFX_STATIC_ASSERT(EFX_RXDP_LOW_LATENCY_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY);
+ EFX_STATIC_ASSERT(EFX_RXDP_PACKED_STREAM_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM);
+ EFX_STATIC_ASSERT(EFX_RXDP_RULES_ENGINE_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE);
+ EFX_STATIC_ASSERT(EFX_RXDP_DPDK_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK);
+
rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL);
if (rc != 0)
goto fail2;
@@ -607,48 +687,49 @@ efx_loopback_mask(
EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS);
EFSYS_ASSERT(maskp != NULL);
- /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR ==
- EFX_LOOPBACK_XAUI_WS_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR ==
- EFX_LOOPBACK_XAUI_WS_NEAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR ==
- EFX_LOOPBACK_XFI_WS_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS ==
- EFX_LOOPBACK_PMA_INT_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS ==
- EFX_LOOPBACK_SD_FEP2_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS ==
- EFX_LOOPBACK_SD_FEP1_5_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS);
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree */
+#define LOOPBACK_CHECK(_mcdi, _efx) \
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_##_mcdi == EFX_LOOPBACK_##_efx)
+
+ LOOPBACK_CHECK(NONE, OFF);
+ LOOPBACK_CHECK(DATA, DATA);
+ LOOPBACK_CHECK(GMAC, GMAC);
+ LOOPBACK_CHECK(XGMII, XGMII);
+ LOOPBACK_CHECK(XGXS, XGXS);
+ LOOPBACK_CHECK(XAUI, XAUI);
+ LOOPBACK_CHECK(GMII, GMII);
+ LOOPBACK_CHECK(SGMII, SGMII);
+ LOOPBACK_CHECK(XGBR, XGBR);
+ LOOPBACK_CHECK(XFI, XFI);
+ LOOPBACK_CHECK(XAUI_FAR, XAUI_FAR);
+ LOOPBACK_CHECK(GMII_FAR, GMII_FAR);
+ LOOPBACK_CHECK(SGMII_FAR, SGMII_FAR);
+ LOOPBACK_CHECK(XFI_FAR, XFI_FAR);
+ LOOPBACK_CHECK(GPHY, GPHY);
+ LOOPBACK_CHECK(PHYXS, PHY_XS);
+ LOOPBACK_CHECK(PCS, PCS);
+ LOOPBACK_CHECK(PMAPMD, PMA_PMD);
+ LOOPBACK_CHECK(XPORT, XPORT);
+ LOOPBACK_CHECK(XGMII_WS, XGMII_WS);
+ LOOPBACK_CHECK(XAUI_WS, XAUI_WS);
+ LOOPBACK_CHECK(XAUI_WS_FAR, XAUI_WS_FAR);
+ LOOPBACK_CHECK(XAUI_WS_NEAR, XAUI_WS_NEAR);
+ LOOPBACK_CHECK(GMII_WS, GMII_WS);
+ LOOPBACK_CHECK(XFI_WS, XFI_WS);
+ LOOPBACK_CHECK(XFI_WS_FAR, XFI_WS_FAR);
+ LOOPBACK_CHECK(PHYXS_WS, PHYXS_WS);
+ LOOPBACK_CHECK(PMA_INT, PMA_INT);
+ LOOPBACK_CHECK(SD_NEAR, SD_NEAR);
+ LOOPBACK_CHECK(SD_FAR, SD_FAR);
+ LOOPBACK_CHECK(PMA_INT_WS, PMA_INT_WS);
+ LOOPBACK_CHECK(SD_FEP2_WS, SD_FEP2_WS);
+ LOOPBACK_CHECK(SD_FEP1_5_WS, SD_FEP1_5_WS);
+ LOOPBACK_CHECK(SD_FEP_WS, SD_FEP_WS);
+ LOOPBACK_CHECK(SD_FES_WS, SD_FES_WS);
+ LOOPBACK_CHECK(AOE_INT_NEAR, AOE_INT_NEAR);
+ LOOPBACK_CHECK(DATA_WS, DATA_WS);
+ LOOPBACK_CHECK(FORCE_EXT_LINK, FORCE_EXT_LINK);
+#undef LOOPBACK_CHECK
/* Build bitmask of possible loopback types */
EFX_ZERO_QWORD(mask);
@@ -706,7 +787,7 @@ efx_mcdi_get_loopback_modes(
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
- MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)];
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN)];
efx_qword_t mask;
efx_qword_t modes;
efx_rc_t rc;
@@ -716,7 +797,7 @@ efx_mcdi_get_loopback_modes(
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN;
+ req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN;
efx_mcdi_execute(enp, &req);
@@ -757,18 +838,51 @@ efx_mcdi_get_loopback_modes(
MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST +
MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) {
/* Response includes 40G loopback modes */
- modes =
- *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G);
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_40G);
EFX_AND_QWORD(modes, mask);
encp->enc_loopback_types[EFX_LINK_40000FDX] = modes;
}
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN) {
+ /* Response includes 25G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_V2_25G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_25000FDX] = modes;
+ }
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN) {
+ /* Response includes 50G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_V2_50G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_50000FDX] = modes;
+ }
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN) {
+ /* Response includes 100G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_V2_100G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_100000FDX] = modes;
+ }
+
EFX_ZERO_QWORD(modes);
EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF);
EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]);
EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]);
EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]);
EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_25000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_50000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100000FDX]);
encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes;
return (0);
@@ -830,6 +944,82 @@ fail1:
return (rc);
}
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+ __checkReturn efx_rc_t
+efx_nic_get_fw_subvariant(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_subvariant_t *subvariantp)
+{
+ efx_rc_t rc;
+ uint32_t value;
+
+ rc = efx_mcdi_get_nic_global(enp,
+ MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, &value);
+ if (rc != 0)
+ goto fail1;
+
+ /* Mapping is not required since values match MCDI */
+ EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_DEFAULT ==
+ MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT);
+ EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM ==
+ MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM);
+
+ switch (value) {
+ case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT:
+ case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM:
+ *subvariantp = value;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_set_fw_subvariant(
+ __in efx_nic_t *enp,
+ __in efx_nic_fw_subvariant_t subvariant)
+{
+ efx_rc_t rc;
+
+ switch (subvariant) {
+ case EFX_NIC_FW_SUBVARIANT_DEFAULT:
+ case EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM:
+ /* Mapping is not required since values match MCDI */
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_mcdi_set_nic_global(enp,
+ MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, subvariant);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
__checkReturn efx_rc_t
efx_nic_check_pcie_link_speed(
diff --git a/drivers/net/sfc/base/efx_nvram.c b/drivers/net/sfc/base/efx_nvram.c
index c2cc9ad3..be409c3a 100644
--- a/drivers/net/sfc/base/efx_nvram.c
+++ b/drivers/net/sfc/base/efx_nvram.c
@@ -30,7 +30,7 @@ static const efx_nvram_ops_t __efx_nvram_siena_ops = {
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
#if EFSYS_OPT_DIAG
@@ -49,7 +49,7 @@ static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
ef10_nvram_buffer_validate, /* envo_buffer_validate */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_nvram_init(
@@ -81,6 +81,12 @@ efx_nvram_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
diff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c
index 069c2836..ba2f51c1 100644
--- a/drivers/net/sfc/base/efx_phy.c
+++ b/drivers/net/sfc/base/efx_phy.c
@@ -27,7 +27,7 @@ static const efx_phy_ops_t __efx_phy_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_phy_ops_t __efx_phy_ef10_ops = {
ef10_phy_power, /* epo_power */
NULL, /* epo_reset */
@@ -44,7 +44,7 @@ static const efx_phy_ops_t __efx_phy_ef10_ops = {
ef10_bist_stop, /* epo_bist_stop */
#endif /* EFSYS_OPT_BIST */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_phy_probe(
@@ -67,16 +67,25 @@ efx_phy_probe(
epop = &__efx_phy_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
+
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
epop = &__efx_phy_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
+
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
epop = &__efx_phy_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
rc = ENOTSUP;
goto fail1;
@@ -176,6 +185,7 @@ efx_phy_adv_cap_get(
break;
default:
EFSYS_ASSERT(B_FALSE);
+ *maskp = 0;
break;
}
}
diff --git a/drivers/net/sfc/base/efx_port.c b/drivers/net/sfc/base/efx_port.c
index a792a9ef..33a1a084 100644
--- a/drivers/net/sfc/base/efx_port.c
+++ b/drivers/net/sfc/base/efx_port.c
@@ -120,7 +120,7 @@ efx_port_loopback_set(
EFSYS_ASSERT(link_mode < EFX_LINK_NMODES);
if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode],
- loopback_type) == 0) {
+ (int)loopback_type) == 0) {
rc = ENOTSUP;
goto fail1;
}
@@ -180,6 +180,9 @@ static const char * const __efx_loopback_type_name[] = {
"SD_FEP1_5_WS",
"SD_FEP_WS",
"SD_FES_WS",
+ "AOE_INT_NEAR",
+ "DATA_WS",
+ "FORCE_EXT_LINK",
};
__checkReturn const char *
diff --git a/drivers/net/sfc/base/efx_regs_ef10.h b/drivers/net/sfc/base/efx_regs_ef10.h
index 5f978305..968aaaca 100644
--- a/drivers/net/sfc/base/efx_regs_ef10.h
+++ b/drivers/net/sfc/base/efx_regs_ef10.h
@@ -24,7 +24,7 @@ extern "C" {
*/
#define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face
@@ -38,7 +38,7 @@ extern "C" {
*/
#define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4
#define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8
#define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face
@@ -54,7 +54,7 @@ extern "C" {
*/
#define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_BIU_INT_ISR_REG_RESET 0x0
@@ -68,7 +68,7 @@ extern "C" {
*/
#define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_MC_DB_LWRD_REG_RESET 0x0
@@ -82,7 +82,7 @@ extern "C" {
*/
#define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_MC_DB_HWRD_REG_RESET 0x0
@@ -96,7 +96,7 @@ extern "C" {
*/
#define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_EVQ_RPTR_REG_STEP 8192
#define ER_DZ_EVQ_RPTR_REG_ROWS 2048
#define ER_DZ_EVQ_RPTR_REG_RESET 0x0
@@ -109,17 +109,95 @@ extern "C" {
/*
+ * EVQ_RPTR_REG_64K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_RPTR_REG_64K_OFST 0x00000400
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_RPTR_REG_64K_STEP 65536
+#define ER_FZ_EVQ_RPTR_REG_64K_ROWS 2048
+#define ER_FZ_EVQ_RPTR_REG_64K_RESET 0x0
+
+
+#define ERF_FZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_FZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_FZ_EVQ_RPTR_LBN 0
+#define ERF_FZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_RPTR_REG_16K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_RPTR_REG_16K_OFST 0x00000400
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_RPTR_REG_16K_STEP 16384
+#define ER_FZ_EVQ_RPTR_REG_16K_ROWS 2048
+#define ER_FZ_EVQ_RPTR_REG_16K_RESET 0x0
+
+
+/* defined as ERF_FZ_EVQ_RPTR_VLD_LBN 15; */
+/* defined as ERF_FZ_EVQ_RPTR_VLD_WIDTH 1 */
+/* defined as ERF_FZ_EVQ_RPTR_LBN 0; */
+/* defined as ERF_FZ_EVQ_RPTR_WIDTH 15 */
+
+
+/*
+ * EVQ_TMR_REG_64K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_TMR_REG_64K_OFST 0x00000420
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_TMR_REG_64K_STEP 65536
+#define ER_FZ_EVQ_TMR_REG_64K_ROWS 2048
+#define ER_FZ_EVQ_TMR_REG_64K_RESET 0x0
+
+
+#define ERF_FZ_TC_TMR_REL_VAL_LBN 16
+#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14
+#define ERF_FZ_TC_TIMER_MODE_LBN 14
+#define ERF_FZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_FZ_TC_TIMER_VAL_LBN 0
+#define ERF_FZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * EVQ_TMR_REG_16K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_TMR_REG_16K_OFST 0x00000420
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_TMR_REG_16K_STEP 16384
+#define ER_FZ_EVQ_TMR_REG_16K_ROWS 2048
+#define ER_FZ_EVQ_TMR_REG_16K_RESET 0x0
+
+
+/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */
+/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */
+/* defined as ERF_FZ_TC_TIMER_MODE_LBN 14; */
+/* defined as ERF_FZ_TC_TIMER_MODE_WIDTH 2 */
+/* defined as ERF_FZ_TC_TIMER_VAL_LBN 0; */
+/* defined as ERF_FZ_TC_TIMER_VAL_WIDTH 14 */
+
+
+/*
* EVQ_TMR_REG(32bit):
*
*/
#define ER_DZ_EVQ_TMR_REG_OFST 0x00000420
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_EVQ_TMR_REG_STEP 8192
#define ER_DZ_EVQ_TMR_REG_ROWS 2048
#define ER_DZ_EVQ_TMR_REG_RESET 0x0
+/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */
+/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */
#define ERF_DZ_TC_TIMER_MODE_LBN 14
#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
#define ERF_DZ_TC_TIMER_VAL_LBN 0
@@ -127,12 +205,28 @@ extern "C" {
/*
+ * RX_DESC_UPD_REG_16K(32bit):
+ *
+ */
+
+#define ER_FZ_RX_DESC_UPD_REG_16K_OFST 0x00000830
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_RX_DESC_UPD_REG_16K_STEP 16384
+#define ER_FZ_RX_DESC_UPD_REG_16K_ROWS 2048
+#define ER_FZ_RX_DESC_UPD_REG_16K_RESET 0x0
+
+
+#define ERF_FZ_RX_DESC_WPTR_LBN 0
+#define ERF_FZ_RX_DESC_WPTR_WIDTH 12
+
+
+/*
* RX_DESC_UPD_REG(32bit):
*
*/
#define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_RX_DESC_UPD_REG_STEP 8192
#define ER_DZ_RX_DESC_UPD_REG_ROWS 2048
#define ER_DZ_RX_DESC_UPD_REG_RESET 0x0
@@ -141,13 +235,74 @@ extern "C" {
#define ERF_DZ_RX_DESC_WPTR_LBN 0
#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/*
+ * RX_DESC_UPD_REG_64K(32bit):
+ *
+ */
+
+#define ER_FZ_RX_DESC_UPD_REG_64K_OFST 0x00000830
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_RX_DESC_UPD_REG_64K_STEP 65536
+#define ER_FZ_RX_DESC_UPD_REG_64K_ROWS 2048
+#define ER_FZ_RX_DESC_UPD_REG_64K_RESET 0x0
+
+
+/* defined as ERF_FZ_RX_DESC_WPTR_LBN 0; */
+/* defined as ERF_FZ_RX_DESC_WPTR_WIDTH 12 */
+
+
+/*
+ * TX_DESC_UPD_REG_64K(96bit):
+ *
+ */
+
+#define ER_FZ_TX_DESC_UPD_REG_64K_OFST 0x00000a10
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_TX_DESC_UPD_REG_64K_STEP 65536
+#define ER_FZ_TX_DESC_UPD_REG_64K_ROWS 2048
+#define ER_FZ_TX_DESC_UPD_REG_64K_RESET 0x0
+
+
+#define ERF_FZ_RSVD_LBN 76
+#define ERF_FZ_RSVD_WIDTH 20
+#define ERF_FZ_TX_DESC_WPTR_LBN 64
+#define ERF_FZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_FZ_TX_DESC_HWORD_LBN 32
+#define ERF_FZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_FZ_TX_DESC_LWORD_LBN 0
+#define ERF_FZ_TX_DESC_LWORD_WIDTH 32
+
+
+/*
+ * TX_DESC_UPD_REG_16K(96bit):
+ *
+ */
+
+#define ER_FZ_TX_DESC_UPD_REG_16K_OFST 0x00000a10
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_TX_DESC_UPD_REG_16K_STEP 16384
+#define ER_FZ_TX_DESC_UPD_REG_16K_ROWS 2048
+#define ER_FZ_TX_DESC_UPD_REG_16K_RESET 0x0
+
+
+/* defined as ERF_FZ_RSVD_LBN 76; */
+/* defined as ERF_FZ_RSVD_WIDTH 20 */
+/* defined as ERF_FZ_TX_DESC_WPTR_LBN 64; */
+/* defined as ERF_FZ_TX_DESC_WPTR_WIDTH 12 */
+/* defined as ERF_FZ_TX_DESC_HWORD_LBN 32; */
+/* defined as ERF_FZ_TX_DESC_HWORD_WIDTH 32 */
+/* defined as ERF_FZ_TX_DESC_LWORD_LBN 0; */
+/* defined as ERF_FZ_TX_DESC_LWORD_WIDTH 32 */
+
+
/*
* TX_DESC_UPD_REG(96bit):
*
*/
#define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_TX_DESC_UPD_REG_STEP 8192
#define ER_DZ_TX_DESC_UPD_REG_ROWS 2048
#define ER_DZ_TX_DESC_UPD_REG_RESET 0x0
@@ -233,16 +388,24 @@ extern "C" {
#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
-#define ESF_DZ_RX_L4_CLASS_LBN 45
-#define ESF_DZ_RX_L4_CLASS_WIDTH 3
-#define ESE_DZ_L4_CLASS_RSVD7 7
-#define ESE_DZ_L4_CLASS_RSVD6 6
-#define ESE_DZ_L4_CLASS_RSVD5 5
-#define ESE_DZ_L4_CLASS_RSVD4 4
-#define ESE_DZ_L4_CLASS_RSVD3 3
-#define ESE_DZ_L4_CLASS_UDP 2
-#define ESE_DZ_L4_CLASS_TCP 1
-#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DE_RX_L4_CLASS_LBN 45
+#define ESF_DE_RX_L4_CLASS_WIDTH 3
+#define ESE_DE_L4_CLASS_RSVD7 7
+#define ESE_DE_L4_CLASS_RSVD6 6
+#define ESE_DE_L4_CLASS_RSVD5 5
+#define ESE_DE_L4_CLASS_RSVD4 4
+#define ESE_DE_L4_CLASS_RSVD3 3
+#define ESE_DE_L4_CLASS_UDP 2
+#define ESE_DE_L4_CLASS_TCP 1
+#define ESE_DE_L4_CLASS_UNKNOWN 0
+#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47
+#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1
+#define ESF_FZ_RX_L4_CLASS_LBN 45
+#define ESF_FZ_RX_L4_CLASS_WIDTH 2
+#define ESE_FZ_L4_CLASS_RSVD3 3
+#define ESE_FZ_L4_CLASS_UDP 2
+#define ESE_FZ_L4_CLASS_TCP 1
+#define ESE_FZ_L4_CLASS_UNKNOWN 0
#define ESF_DZ_RX_L3_CLASS_LBN 42
#define ESF_DZ_RX_L3_CLASS_WIDTH 3
#define ESE_DZ_L3_CLASS_RSVD7 7
@@ -289,6 +452,8 @@ extern "C" {
#define ESF_EZ_RX_ABORT_WIDTH 1
#define ESF_DZ_RX_ECC_ERR_LBN 29
#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_TRUNC_ERR_LBN 29
+#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1
#define ESF_DZ_RX_CRC1_ERR_LBN 28
#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
#define ESF_DZ_RX_CRC0_ERR_LBN 27
@@ -419,6 +584,8 @@ extern "C" {
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
@@ -429,7 +596,7 @@ extern "C" {
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_TSO_FATSO2A_DESC */
+/* ES_TX_TSO_V2_DESC_A */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -449,7 +616,7 @@ extern "C" {
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_TSO_FATSO2B_DESC */
+/* ES_TX_TSO_V2_DESC_B */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -463,12 +630,10 @@ extern "C" {
#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
-#define ESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0
-#define ESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16
+#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0
+#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16
/* ES_TX_VLAN_DESC */
@@ -533,6 +698,21 @@ extern "C" {
#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48
#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16
+/* Equal stride super-buffer RX packet prefix (see SF-119419-TC) */
+#define ES_EZ_ESSB_RX_PREFIX_LEN 8
+#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_LBN 0
+#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_WIDTH 16
+#define ES_EZ_ESSB_RX_PREFIX_MARK_LBN 16
+#define ES_EZ_ESSB_RX_PREFIX_MARK_WIDTH 8
+#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN 28
+#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_WIDTH 1
+#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN 29
+#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_WIDTH 1
+#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN 30
+#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_WIDTH 1
+#define ES_EZ_ESSB_RX_PREFIX_HASH_LBN 32
+#define ES_EZ_ESSB_RX_PREFIX_HASH_WIDTH 32
+
/*
* An extra flag for the packed stream mode,
* signalling the start of a new buffer
diff --git a/drivers/net/sfc/base/efx_regs_mcdi.h b/drivers/net/sfc/base/efx_regs_mcdi.h
index 7389877a..cf8a7936 100644
--- a/drivers/net/sfc/base/efx_regs_mcdi.h
+++ b/drivers/net/sfc/base/efx_regs_mcdi.h
@@ -280,7 +280,8 @@
#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
/* Workaround 26807 could not be turned on/off because some functions
* have already installed filters. See the comment at
- * MC_CMD_WORKAROUND_BUG26807. */
+ * MC_CMD_WORKAROUND_BUG26807.
+ * May also returned for other operations such as sub-variant switching. */
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
/* The clock whose frequency you've attempted to set set
* doesn't exist on this NIC */
@@ -291,6 +292,18 @@
/* This command needs to be processed in the background but there were no
* resources to do so. Send it again after a command has completed. */
#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/* The operation could not complete because some PIO buffers are allocated */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
#define MC_CMD_ERR_CODE_OFST 0
@@ -311,10 +324,17 @@
#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
-/* Points to the recovery mode entry point. */
+/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */
#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+/* Points to the recovery mode entry point. Same as above, but the right name. */
+#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4)
+
+/* Points to noflash mode entry point. */
+#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4)
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -368,7 +388,7 @@
#define MCDI_EVENT_LEVEL_LBN 33
#define MCDI_EVENT_LEVEL_WIDTH 3
/* enum: Info. */
-#define MCDI_EVENT_LEVEL_INFO 0x0
+#define MCDI_EVENT_LEVEL_INFO 0x0
/* enum: Warning. */
#define MCDI_EVENT_LEVEL_WARN 0x1
/* enum: Error. */
@@ -376,6 +396,7 @@
/* enum: Fatal. */
#define MCDI_EVENT_LEVEL_FATAL 0x3
#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
@@ -386,14 +407,22 @@
#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0
/* enum: 100Mbs */
-#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
/* enum: 1Gbs */
-#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
/* enum: 10Gbs */
-#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
/* enum: 40Gbs */
-#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+/* enum: 25Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5
+/* enum: 50Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
+/* enum: 100Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -482,8 +511,23 @@
#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define MCDI_EVENT_AOE_FC_RUNNING 0x14
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
/* enum: Reading from NV failed */
@@ -536,6 +580,22 @@
#define MCDI_EVENT_MUM_WATCHDOG 0x3
#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DBRET_SEQ_LBN 0
+#define MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define MCDI_EVENT_SUC_WATCHDOG 0x4
+#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -569,23 +629,23 @@
/* enum: Transmit error */
#define MCDI_EVENT_CODE_TX_ERR 0xb
/* enum: Tx flush has completed */
-#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
/* enum: PTP packet received timestamp */
-#define MCDI_EVENT_CODE_PTP_RX 0xd
+#define MCDI_EVENT_CODE_PTP_RX 0xd
/* enum: PTP NIC failure */
-#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
/* enum: PTP PPS event */
-#define MCDI_EVENT_CODE_PTP_PPS 0xf
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
/* enum: Rx flush has completed */
-#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
/* enum: Receive error */
#define MCDI_EVENT_CODE_RX_ERR 0x11
/* enum: AOE fault */
-#define MCDI_EVENT_CODE_AOE 0x12
+#define MCDI_EVENT_CODE_AOE 0x12
/* enum: Network port calibration failed (VCAL). */
-#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
/* enum: HW PPS event */
-#define MCDI_EVENT_CODE_HW_PPS 0x14
+#define MCDI_EVENT_CODE_HW_PPS 0x14
/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
* a different format)
*/
@@ -608,73 +668,99 @@
* been processed and it may now resend the command
*/
#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define MCDI_EVENT_CODE_SUC 0x1f
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
-#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LEN 4
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
* timestamp
*/
#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LEN 4
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
* timestamp
*/
#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LEN 4
#define MCDI_EVENT_PTP_MAJOR_LBN 0
#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
* of timestamp
*/
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
* timestamp
*/
#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LEN 4
#define MCDI_EVENT_PTP_MINOR_LBN 0
#define MCDI_EVENT_PTP_MINOR_WIDTH 32
/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
*/
#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LEN 4
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LEN 4
#define MCDI_EVENT_RX_ERR_DATA_LBN 0
#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LEN 4
#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
/* For CODE_PTP_TIME events, the major value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
/* For CODE_PTP_TIME events where report sync status is enabled, indicates
* whether the NIC clock has ever been set
*/
@@ -690,10 +776,17 @@
*/
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
/* Zero means that the request has been completed or authorized, and the driver
@@ -702,6 +795,10 @@
*/
#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define MCDI_EVENT_DBRET_DATA_OFST 0
+#define MCDI_EVENT_DBRET_DATA_LEN 4
+#define MCDI_EVENT_DBRET_DATA_LBN 0
+#define MCDI_EVENT_DBRET_DATA_WIDTH 32
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -710,7 +807,7 @@
#define FCDI_EVENT_LEVEL_LBN 33
#define FCDI_EVENT_LEVEL_WIDTH 3
/* enum: Info. */
-#define FCDI_EVENT_LEVEL_INFO 0x0
+#define FCDI_EVENT_LEVEL_INFO 0x0
/* enum: Warning. */
#define FCDI_EVENT_LEVEL_WARN 0x1
/* enum: Error. */
@@ -718,6 +815,7 @@
/* enum: Fatal. */
#define FCDI_EVENT_LEVEL_FATAL 0x3
#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
@@ -757,6 +855,7 @@
#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
#define FCDI_EVENT_ASSERT_TYPE_LBN 36
@@ -764,12 +863,15 @@
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_STATE_LEN 4
#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
@@ -778,6 +880,7 @@
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
/* Index of MC port being referred to */
@@ -785,9 +888,11 @@
#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
/* FC Port index that matches the MC port index in SRC */
#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
#define FCDI_EVENT_BOOT_RESULT_OFST 0
+#define FCDI_EVENT_BOOT_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
#define FCDI_EVENT_BOOT_RESULT_LBN 0
@@ -804,14 +909,17 @@
#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
/* Seconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
/* Nanoseconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
@@ -831,7 +939,7 @@
#define MUM_EVENT_LEVEL_LBN 33
#define MUM_EVENT_LEVEL_WIDTH 3
/* enum: Info. */
-#define MUM_EVENT_LEVEL_INFO 0x0
+#define MUM_EVENT_LEVEL_INFO 0x0
/* enum: Warning. */
#define MUM_EVENT_LEVEL_WARN 0x1
/* enum: Error. */
@@ -839,6 +947,7 @@
/* enum: Fatal. */
#define MUM_EVENT_LEVEL_FATAL 0x3
#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_DATA_LEN 4
#define MUM_EVENT_SENSOR_ID_LBN 0
#define MUM_EVENT_SENSOR_ID_WIDTH 8
/* Enum values, see field(s): */
@@ -876,18 +985,23 @@
/* enum: Link fault has been asserted, or has cleared. */
#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LEN 4
#define MUM_EVENT_SENSOR_DATA_LBN 0
#define MUM_EVENT_SENSOR_DATA_WIDTH 32
#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_TECH_LEN 4
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
@@ -911,7 +1025,9 @@
/***********************************/
/* MC_CMD_READ32
- * Read multiple 32byte words from MC memory.
+ * Read multiple 32byte words from MC memory. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
*/
#define MC_CMD_READ32 0x1
#undef MC_CMD_0x1_PRIVILEGE_CTG
@@ -921,7 +1037,9 @@
/* MC_CMD_READ32_IN msgrequest */
#define MC_CMD_READ32_IN_LEN 8
#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_ADDR_LEN 4
#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_READ32_OUT msgresponse */
#define MC_CMD_READ32_OUT_LENMIN 4
@@ -940,13 +1058,14 @@
#define MC_CMD_WRITE32 0x2
#undef MC_CMD_0x2_PRIVILEGE_CTG
-#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_WRITE32_IN msgrequest */
#define MC_CMD_WRITE32_IN_LENMIN 8
#define MC_CMD_WRITE32_IN_LENMAX 252
#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
@@ -958,12 +1077,14 @@
/***********************************/
/* MC_CMD_COPYCODE
- * Copy MC code between two locations and jump.
+ * Copy MC code between two locations and jump. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
*/
#define MC_CMD_COPYCODE 0x3
#undef MC_CMD_0x3_PRIVILEGE_CTG
-#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
@@ -974,6 +1095,7 @@
* is a bitfield, with each bit as documented below.
*/
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
@@ -999,9 +1121,12 @@
#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
/* Address of where to jump after copy. */
#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
/* enum: Control should return to the caller rather than jumping */
#define MC_CMD_COPYCODE_JUMP_NONE 0x1
@@ -1016,12 +1141,13 @@
#define MC_CMD_SET_FUNC 0x4
#undef MC_CMD_0x4_PRIVILEGE_CTG
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
/* Set function */
#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
/* MC_CMD_SET_FUNC_OUT msgresponse */
#define MC_CMD_SET_FUNC_OUT_LEN 0
@@ -1034,7 +1160,7 @@
#define MC_CMD_GET_BOOT_STATUS 0x5
#undef MC_CMD_0x5_PRIVILEGE_CTG
-#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -1043,9 +1169,11 @@
#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
/* ?? */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
/* enum: indicates that the MC wasn't flash booted */
-#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
@@ -1069,11 +1197,13 @@
#define MC_CMD_GET_ASSERTS_IN_LEN 4
/* Set to clear assertion */
#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
/* MC_CMD_GET_ASSERTS_OUT msgresponse */
#define MC_CMD_GET_ASSERTS_OUT_LEN 140
/* Assertion status flag. */
#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
/* enum: No assertions have failed. */
#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
/* enum: A system-level assertion has failed. */
@@ -1086,6 +1216,7 @@
#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
/* Failing PC value */
#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
/* Saved GP regs */
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
@@ -1096,7 +1227,9 @@
#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
/***********************************/
@@ -1113,12 +1246,14 @@
#define MC_CMD_LOG_CTRL_IN_LEN 8
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
/* Legacy argument. Must be zero. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
#define MC_CMD_LOG_CTRL_OUT_LEN 0
@@ -1140,23 +1275,29 @@
#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
/* placeholder, set to 0 */
#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
/* enum: Reserved version number to indicate "any" version. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
/* enum: Bootrom version value for Siena. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
/* enum: Bootrom version value for Huntington. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1168,9 +1309,11 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1184,2421 +1327,6 @@
/***********************************/
-/* MC_CMD_FC
- * Perform an FC operation
- */
-#define MC_CMD_FC 0x9
-
-/* MC_CMD_FC_IN msgrequest */
-#define MC_CMD_FC_IN_LEN 4
-#define MC_CMD_FC_IN_OP_HDR_OFST 0
-#define MC_CMD_FC_IN_OP_LBN 0
-#define MC_CMD_FC_IN_OP_WIDTH 8
-/* enum: NULL MCDI command to FC. */
-#define MC_CMD_FC_OP_NULL 0x1
-/* enum: Unused opcode */
-#define MC_CMD_FC_OP_UNUSED 0x2
-/* enum: MAC driver commands */
-#define MC_CMD_FC_OP_MAC 0x3
-/* enum: Read FC memory */
-#define MC_CMD_FC_OP_READ32 0x4
-/* enum: Write to FC memory */
-#define MC_CMD_FC_OP_WRITE32 0x5
-/* enum: Read FC memory */
-#define MC_CMD_FC_OP_TRC_READ 0x6
-/* enum: Write to FC memory */
-#define MC_CMD_FC_OP_TRC_WRITE 0x7
-/* enum: FC firmware Version */
-#define MC_CMD_FC_OP_GET_VERSION 0x8
-/* enum: Read FC memory */
-#define MC_CMD_FC_OP_TRC_RX_READ 0x9
-/* enum: Write to FC memory */
-#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa
-/* enum: SFP parameters */
-#define MC_CMD_FC_OP_SFP 0xb
-/* enum: DDR3 test */
-#define MC_CMD_FC_OP_DDR_TEST 0xc
-/* enum: Get Crash context from FC */
-#define MC_CMD_FC_OP_GET_ASSERT 0xd
-/* enum: Get FPGA Build registers */
-#define MC_CMD_FC_OP_FPGA_BUILD 0xe
-/* enum: Read map support commands */
-#define MC_CMD_FC_OP_READ_MAP 0xf
-/* enum: FC Capabilities */
-#define MC_CMD_FC_OP_CAPABILITIES 0x10
-/* enum: FC Global flags */
-#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11
-/* enum: FC IO using relative addressing modes */
-#define MC_CMD_FC_OP_IO_REL 0x12
-/* enum: FPGA link information */
-#define MC_CMD_FC_OP_UHLINK 0x13
-/* enum: Configure loopbacks and link on FPGA ports */
-#define MC_CMD_FC_OP_SET_LINK 0x14
-/* enum: Licensing operations relating to AOE */
-#define MC_CMD_FC_OP_LICENSE 0x15
-/* enum: Startup information to the FC */
-#define MC_CMD_FC_OP_STARTUP 0x16
-/* enum: Configure a DMA read */
-#define MC_CMD_FC_OP_DMA 0x17
-/* enum: Configure a timed read */
-#define MC_CMD_FC_OP_TIMED_READ 0x18
-/* enum: Control UART logging */
-#define MC_CMD_FC_OP_LOG 0x19
-/* enum: Get the value of a given clock_id */
-#define MC_CMD_FC_OP_CLOCK 0x1a
-/* enum: DDR3/QDR3 parameters */
-#define MC_CMD_FC_OP_DDR 0x1b
-/* enum: PTP and timestamp control */
-#define MC_CMD_FC_OP_TIMESTAMP 0x1c
-/* enum: Commands for SPI Flash interface */
-#define MC_CMD_FC_OP_SPI 0x1d
-/* enum: Commands for diagnostic components */
-#define MC_CMD_FC_OP_DIAG 0x1e
-/* enum: External AOE port. */
-#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0
-/* enum: Internal AOE port. */
-#define MC_CMD_FC_IN_PORT_INT_OFST 0x40
-
-/* MC_CMD_FC_IN_NULL msgrequest */
-#define MC_CMD_FC_IN_NULL_LEN 4
-#define MC_CMD_FC_IN_CMD_OFST 0
-
-/* MC_CMD_FC_IN_PHY msgrequest */
-#define MC_CMD_FC_IN_PHY_LEN 5
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* FC PHY driver operation code */
-#define MC_CMD_FC_IN_PHY_OP_OFST 4
-#define MC_CMD_FC_IN_PHY_OP_LEN 1
-/* enum: PHY init handler */
-#define MC_CMD_FC_OP_PHY_OP_INIT 0x1
-/* enum: PHY reconfigure handler */
-#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2
-/* enum: PHY reboot handler */
-#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3
-/* enum: PHY get_supported_cap handler */
-#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4
-/* enum: PHY get_config handler */
-#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5
-/* enum: PHY get_media_info handler */
-#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6
-/* enum: PHY set_led handler */
-#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7
-/* enum: PHY lasi_interrupt handler */
-#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8
-/* enum: PHY check_link handler */
-#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9
-/* enum: PHY fill_stats handler */
-#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa
-/* enum: PHY bpx_link_state_changed handler */
-#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb
-/* enum: PHY get_state handler */
-#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc
-/* enum: PHY start_bist handler */
-#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd
-/* enum: PHY poll_bist handler */
-#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe
-/* enum: PHY nvram_test handler */
-#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf
-/* enum: PHY relinquish handler */
-#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10
-/* enum: PHY read connection from FC - may be not required */
-#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11
-/* enum: PHY read flags from FC - may be not required */
-#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12
-
-/* MC_CMD_FC_IN_PHY_INIT msgrequest */
-#define MC_CMD_FC_IN_PHY_INIT_LEN 4
-#define MC_CMD_FC_IN_PHY_CMD_OFST 0
-
-/* MC_CMD_FC_IN_MAC msgrequest */
-#define MC_CMD_FC_IN_MAC_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_MAC_HEADER_OFST 4
-#define MC_CMD_FC_IN_MAC_OP_LBN 0
-#define MC_CMD_FC_IN_MAC_OP_WIDTH 8
-/* enum: MAC reconfigure handler */
-#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1
-/* enum: MAC Set command - same as MC_CMD_SET_MAC */
-#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2
-/* enum: MAC statistics */
-#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3
-/* enum: MAC RX statistics */
-#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6
-/* enum: MAC TX statistics */
-#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7
-/* enum: MAC Read status */
-#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8
-#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8
-#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8
-/* enum: External FPGA port. */
-#define MC_CMD_FC_PORT_EXT 0x0
-/* enum: Internal Siena-facing FPGA ports. */
-#define MC_CMD_FC_PORT_INT 0x1
-#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16
-#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8
-#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24
-#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8
-/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
- * irrelevant. Port number is derived from pci_fn; passed in FC header.
- */
-#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0
-/* enum: Override default port number. Port number determined by fields
- * PORT_TYPE and PORT_IDX.
- */
-#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1
-
-/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */
-#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */
-#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-/* MTU size */
-#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8
-/* Drain Tx FIFO */
-#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12
-#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16
-#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8
-#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16
-#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1
-#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28
-
-/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */
-#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */
-#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */
-#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */
-#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-/* MC Statistics index */
-#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8
-#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12
-#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0
-#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1
-#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1
-#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1
-#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2
-#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1
-/* Number of statistics to read */
-#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16
-#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */
-#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */
-
-/* MC_CMD_FC_IN_READ32 msgrequest */
-#define MC_CMD_FC_IN_READ32_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4
-#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8
-#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12
-
-/* MC_CMD_FC_IN_WRITE32 msgrequest */
-#define MC_CMD_FC_IN_WRITE32_LENMIN 16
-#define MC_CMD_FC_IN_WRITE32_LENMAX 252
-#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num))
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4
-#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8
-#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12
-#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4
-#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1
-#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60
-
-/* MC_CMD_FC_IN_TRC_READ msgrequest */
-#define MC_CMD_FC_IN_TRC_READ_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4
-#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8
-
-/* MC_CMD_FC_IN_TRC_WRITE msgrequest */
-#define MC_CMD_FC_IN_TRC_WRITE_LEN 28
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4
-#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8
-#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12
-#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4
-#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4
-
-/* MC_CMD_FC_IN_GET_VERSION msgrequest */
-#define MC_CMD_FC_IN_GET_VERSION_LEN 4
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-
-/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */
-#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4
-#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8
-
-/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */
-#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4
-#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8
-#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12
-#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4
-#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2
-
-/* MC_CMD_FC_IN_SFP msgrequest */
-#define MC_CMD_FC_IN_SFP_LEN 28
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* Link speed is 100, 1000, 10000, 40000 */
-#define MC_CMD_FC_IN_SFP_SPEED_OFST 4
-/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */
-#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8
-/* Not relevant for cards with QSFP modules. For older cards, true if module is
- * a dual speed SFP+ module.
- */
-#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12
-/* True if an SFP Module is present (other fields valid when true) */
-#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16
-/* The type of the SFP+ Module. For later cards with QSFP modules, this field
- * is unused and the type is communicated by other means.
- */
-#define MC_CMD_FC_IN_SFP_TYPE_OFST 20
-/* Capabilities corresponding to 1 bits. */
-#define MC_CMD_FC_IN_SFP_CAPS_OFST 24
-
-/* MC_CMD_FC_IN_DDR_TEST msgrequest */
-#define MC_CMD_FC_IN_DDR_TEST_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4
-#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0
-#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8
-/* enum: DRAM Test Start */
-#define MC_CMD_FC_OP_DDR_TEST_START 0x1
-/* enum: DRAM Test Poll */
-#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2
-
-/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */
-#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
-#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8
-#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0
-#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1
-#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1
-#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1
-#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2
-#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1
-#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3
-#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1
-
-/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */
-#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12
-#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0
-/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
-/* Clear previous test result and prepare for restarting DDR test */
-#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8
-
-/* MC_CMD_FC_IN_GET_ASSERT msgrequest */
-#define MC_CMD_FC_IN_GET_ASSERT_LEN 4
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-
-/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */
-#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* FPGA build info operation code */
-#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4
-/* enum: Get the build registers */
-#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1
-/* enum: Get the services registers */
-#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2
-/* enum: Get the BSP version */
-#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3
-/* enum: Get build register for V2 (SFA974X) */
-#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4
-/* enum: GEt the services register for V2 (SFA974X) */
-#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5
-
-/* MC_CMD_FC_IN_READ_MAP msgrequest */
-#define MC_CMD_FC_IN_READ_MAP_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4
-#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0
-#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8
-/* enum: Get the number of map regions */
-#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1
-/* enum: Get the specified map */
-#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2
-
-/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */
-#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */
-#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
-#define MC_CMD_FC_IN_MAP_INDEX_OFST 8
-
-/* MC_CMD_FC_IN_CAPABILITIES msgrequest */
-#define MC_CMD_FC_IN_CAPABILITIES_LEN 4
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-
-/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1
-
-/* MC_CMD_FC_IN_IO_REL msgrequest */
-#define MC_CMD_FC_IN_IO_REL_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4
-#define MC_CMD_FC_IN_IO_REL_OP_LBN 0
-#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8
-/* enum: Get the base address that the FC applies to relative commands */
-#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1
-/* enum: Read data */
-#define MC_CMD_FC_IN_IO_REL_READ32 0x2
-/* enum: Write data */
-#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3
-#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8
-#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8
-/* enum: Application address space */
-#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1
-/* enum: Flash address space */
-#define MC_CMD_FC_COMP_TYPE_FLASH 0x2
-
-/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */
-#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */
-#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
-#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8
-#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12
-#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16
-
-/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */
-#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20
-#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252
-#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num))
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
-#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8
-#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12
-#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16
-#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4
-#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1
-#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59
-
-/* MC_CMD_FC_IN_UHLINK msgrequest */
-#define MC_CMD_FC_IN_UHLINK_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4
-#define MC_CMD_FC_IN_UHLINK_OP_LBN 0
-#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8
-/* enum: Get PHY configuration info */
-#define MC_CMD_FC_OP_UHLINK_PHY 0x1
-/* enum: Get MAC configuration info */
-#define MC_CMD_FC_OP_UHLINK_MAC 0x2
-/* enum: Get Rx eye table */
-#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3
-/* enum: Get Rx eye plot */
-#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4
-/* enum: Get Rx eye plot */
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5
-/* enum: Retune Rx settings */
-#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6
-/* enum: Set loopback mode on fpga port */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7
-/* enum: Get loopback mode config state on fpga port */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8
-#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8
-#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8
-#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16
-#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8
-#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24
-#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8
-/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
- * irrelevant. Port number is derived from pci_fn; passed in FC header.
- */
-#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0
-/* enum: Override default port number. Port number determined by fields
- * PORT_TYPE and PORT_IDX.
- */
-#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1
-
-/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */
-#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-
-/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */
-#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-
-/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */
-#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8
-#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */
-
-/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */
-#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-
-/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16
-#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */
-
-/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */
-#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-
-/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8
-#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */
-#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */
-#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12
-#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */
-#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */
-
-/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8
-
-/* MC_CMD_FC_IN_SET_LINK msgrequest */
-#define MC_CMD_FC_IN_SET_LINK_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
-#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4
-#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8
-#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12
-#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0
-#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1
-#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1
-#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1
-#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2
-#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1
-
-/* MC_CMD_FC_IN_LICENSE msgrequest */
-#define MC_CMD_FC_IN_LICENSE_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_LICENSE_OP_OFST 4
-#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */
-#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */
-
-/* MC_CMD_FC_IN_STARTUP msgrequest */
-#define MC_CMD_FC_IN_STARTUP_LEN 40
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4
-#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8
-/* Length of identifier */
-#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12
-/* Identifier for AOE FPGA */
-#define MC_CMD_FC_IN_STARTUP_ID_OFST 16
-#define MC_CMD_FC_IN_STARTUP_ID_LEN 1
-#define MC_CMD_FC_IN_STARTUP_ID_NUM 24
-
-/* MC_CMD_FC_IN_DMA msgrequest */
-#define MC_CMD_FC_IN_DMA_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DMA_OP_OFST 4
-#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */
-#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */
-
-/* MC_CMD_FC_IN_DMA_STOP msgrequest */
-#define MC_CMD_FC_IN_DMA_STOP_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
-/* FC supplied handle */
-#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8
-
-/* MC_CMD_FC_IN_DMA_READ msgrequest */
-#define MC_CMD_FC_IN_DMA_READ_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
-#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8
-#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12
-
-/* MC_CMD_FC_IN_TIMED_READ msgrequest */
-#define MC_CMD_FC_IN_TIMED_READ_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4
-#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */
-
-/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */
-#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
-/* Host supplied handle (unique) */
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8
-/* Address into which to transfer data in host */
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16
-/* AOE address from which to transfer data */
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24
-/* Length of AOE transfer (total) */
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28
-/* Length of host transfer (total) */
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32
-/* Offset back from aoe_address to apply operation to */
-#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36
-/* Data to apply at offset */
-#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40
-#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44
-#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0
-#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1
-#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1
-#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1
-#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2
-#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1
-#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3
-#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2
-#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */
-/* Period at which reads are performed (100ms units) */
-#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48
-
-/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */
-#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
-/* FC supplied handle */
-#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8
-
-/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */
-#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
-/* FC supplied handle */
-#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8
-
-/* MC_CMD_FC_IN_LOG msgrequest */
-#define MC_CMD_FC_IN_LOG_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_LOG_OP_OFST 4
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */
-#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */
-
-/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
-/* Partition offset into flash */
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8
-/* Partition length */
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12
-/* Partition erase size */
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16
-
-/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */
-#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
-/* Enable/disable printing to JTAG UART */
-#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8
-
-/* MC_CMD_FC_IN_CLOCK msgrequest */
-#define MC_CMD_FC_IN_CLOCK_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_CLOCK_OP_OFST 4
-#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */
-#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */
-/* Perform a clock operation */
-#define MC_CMD_FC_IN_CLOCK_ID_OFST 8
-#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */
-#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */
-
-/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest */
-#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
-/* Retrieve the clock value of the specified clock */
-/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
-
-/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest */
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
-/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16
-/* Set the clock value of the specified clock */
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20
-
-/* MC_CMD_FC_IN_DDR msgrequest */
-#define MC_CMD_FC_IN_DDR_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DDR_OP_OFST 4
-#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */
-#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */
-#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */
-#define MC_CMD_FC_IN_DDR_BANK_OFST 8
-#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */
-#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */
-#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */
-#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */
-#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */
-
-/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */
-#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
-/* Affected bank */
-/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
-/* Flags */
-#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12
-#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */
-/* 128-byte page of serial presence detect data read from module's EEPROM */
-#define MC_CMD_FC_IN_DDR_SPD_OFST 16
-#define MC_CMD_FC_IN_DDR_SPD_LEN 1
-#define MC_CMD_FC_IN_DDR_SPD_NUM 128
-/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */
-#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144
-
-/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */
-#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
-/* Affected bank */
-/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
-/* Size of DDR */
-#define MC_CMD_FC_IN_DDR_SIZE_OFST 12
-
-/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */
-#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
-/* Affected bank */
-/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
-
-/* MC_CMD_FC_IN_TIMESTAMP msgrequest */
-#define MC_CMD_FC_IN_TIMESTAMP_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* FC timestamp operation code */
-#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4
-/* enum: Read transmit timestamp(s) */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0
-/* enum: Read snapshot timestamps */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1
-/* enum: Clear all transmit timestamps */
-#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2
-
-/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4
-/* Control filtering of the returned timestamp and sequence number specified
- * here
- */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8
-/* enum: Return most recent timestamp. No filtering */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0
-/* enum: Match timestamp against the PTP clock ID, port number and sequence
- * number specified
- */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1
-/* Clock identity of PTP packet for which timestamp required */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16
-/* Port number of PTP packet for which timestamp required */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20
-/* Sequence number of PTP packet for which timestamp required */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24
-
-/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4
-
-/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */
-#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4
-
-/* MC_CMD_FC_IN_SPI msgrequest */
-#define MC_CMD_FC_IN_SPI_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* Basic commands for SPI Flash. */
-#define MC_CMD_FC_IN_SPI_OP_OFST 4
-/* enum: SPI Flash read */
-#define MC_CMD_FC_IN_SPI_READ 0x0
-/* enum: SPI Flash write */
-#define MC_CMD_FC_IN_SPI_WRITE 0x1
-/* enum: SPI Flash erase */
-#define MC_CMD_FC_IN_SPI_ERASE 0x2
-
-/* MC_CMD_FC_IN_SPI_READ msgrequest */
-#define MC_CMD_FC_IN_SPI_READ_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4
-#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8
-#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12
-
-/* MC_CMD_FC_IN_SPI_WRITE msgrequest */
-#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16
-#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252
-#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num))
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4
-#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8
-#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12
-#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4
-#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1
-#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60
-
-/* MC_CMD_FC_IN_SPI_ERASE msgrequest */
-#define MC_CMD_FC_IN_SPI_ERASE_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4
-#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8
-#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12
-
-/* MC_CMD_FC_IN_DIAG msgrequest */
-#define MC_CMD_FC_IN_DIAG_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* Operation code indicating component type */
-#define MC_CMD_FC_IN_DIAG_OP_OFST 4
-/* enum: Power noise generator. */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0
-/* enum: DDR soak test component. */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1
-/* enum: Diagnostics datapath control component. */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2
-
-/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4
-/* Sub-opcode describing the operation to be carried out */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8
-/* enum: Read the configuration (the 32-bit values in each of the clock enable
- * count and toggle count registers)
- */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0
-/* enum: Write a new configuration to the clock enable count and toggle count
- * registers
- */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1
-
-/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8
-
-/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8
-/* The 32-bit value to be written to the toggle count register */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12
-/* The 32-bit value to be written to the clock enable count register */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4
-/* Sub-opcode describing the operation to be carried out */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8
-/* enum: Starts DDR soak test on selected banks */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0
-/* enum: Read status of DDR soak test */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1
-/* enum: Stop test */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2
-/* enum: Set or clear bit that triggers fake errors. These cause subsequent
- * tests to fail until the bit is cleared.
- */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8
-/* Mask of DDR banks to be tested */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12
-/* Pattern to use in the soak test */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */
-/* Either multiple automatic tests until a STOP command is issued, or one
- * single test
- */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8
-/* DDR bank to read status from */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12
-#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */
-#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */
-#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */
-#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */
-#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8
-/* Mask of DDR banks to be tested */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8
-/* Mask of DDR banks to set/clear error flag on */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */
-
-/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4
-/* Sub-opcode describing the operation to be carried out */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8
-/* enum: Set a known datapath configuration */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0
-/* enum: Apply raw config to datapath control registers */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1
-
-/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8
-/* Datapath configuration identifier */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */
-
-/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8
-/* Value to write into control register 1 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12
-/* Value to write into control register 2 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16
-/* Value to write into control register 3 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20
-
-/* MC_CMD_FC_OUT msgresponse */
-#define MC_CMD_FC_OUT_LEN 0
-
-/* MC_CMD_FC_OUT_NULL msgresponse */
-#define MC_CMD_FC_OUT_NULL_LEN 0
-
-/* MC_CMD_FC_OUT_READ32 msgresponse */
-#define MC_CMD_FC_OUT_READ32_LENMIN 4
-#define MC_CMD_FC_OUT_READ32_LENMAX 252
-#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num))
-#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0
-#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4
-#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1
-#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63
-
-/* MC_CMD_FC_OUT_WRITE32 msgresponse */
-#define MC_CMD_FC_OUT_WRITE32_LEN 0
-
-/* MC_CMD_FC_OUT_TRC_READ msgresponse */
-#define MC_CMD_FC_OUT_TRC_READ_LEN 16
-#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0
-#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4
-#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4
-
-/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */
-#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0
-
-/* MC_CMD_FC_OUT_GET_VERSION msgresponse */
-#define MC_CMD_FC_OUT_GET_VERSION_LEN 12
-#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0
-#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4
-#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8
-#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4
-#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8
-
-/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */
-#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8
-#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0
-#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4
-#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2
-
-/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */
-#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0
-
-/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */
-#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0
-
-/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */
-#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0
-
-/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */
-#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4
-#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0
-
-/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3)
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS
-#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */
-#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */
-#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */
-#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
-#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */
-#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */
-#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */
-#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */
-#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */
-#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */
-#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */
-#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */
-/* enum: (Last entry) */
-#define MC_CMD_FC_MAC_RX_NSTATS 0x19
-
-/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3)
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS
-#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */
-#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */
-#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */
-#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
-#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */
-#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */
-#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */
-#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */
-#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */
-#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */
-#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */
-#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */
-/* enum: (Last entry) */
-#define MC_CMD_FC_MAC_TX_NSTATS 0x16
-
-/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */
-#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3)
-/* MAC Statistics */
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK
-
-/* MC_CMD_FC_OUT_MAC msgresponse */
-#define MC_CMD_FC_OUT_MAC_LEN 0
-
-/* MC_CMD_FC_OUT_SFP msgresponse */
-#define MC_CMD_FC_OUT_SFP_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */
-#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8
-/* enum: Test not yet initiated */
-#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0
-/* enum: Test is in progress */
-#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1
-/* enum: Timed completed */
-#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2
-/* enum: Test did not complete in specified time */
-#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1
-/* Test result from FPGA */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */
-
-/* MC_CMD_FC_OUT_DDR_TEST msgresponse */
-#define MC_CMD_FC_OUT_DDR_TEST_LEN 0
-
-/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */
-#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144
-/* Assertion status flag. */
-#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0
-#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8
-#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8
-/* enum: No crash data available */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0
-/* enum: New crash data available */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1
-/* enum: Crash data has been sent */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2
-#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0
-#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8
-/* enum: No crash has been recorded. */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0
-/* enum: Crash due to exception. */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1
-/* enum: Crash due to assertion. */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2
-/* Failing PC value */
-#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4
-/* Saved GP regs */
-#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8
-#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4
-#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31
-/* Exception Type */
-#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132
-/* Instruction at which exception occurred */
-#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136
-/* BAD Address that triggered address-based exception */
-#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140
-
-/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */
-#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32
-#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31
-#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30
-#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4
-/* Build timestamp (seconds since epoch) */
-#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8
-#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */
-#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10
-#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18
-#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27
-#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2
-#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31
-#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1
-#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */
-#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24
-#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28
-#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16
-
-/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4
-/* Build timestamp (seconds since epoch) */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4
-#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1
-/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
-/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16
-
-/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32
-#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14
-#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4
-/* Build timestamp (seconds since epoch) */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27
-#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28
-#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29
-#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30
-#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31
-#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12
-#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24
-#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28
-#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16
-
-/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4
-/* Build timestamp (seconds since epoch) */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1
-/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
-/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16
-
-/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */
-#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4
-/* Qsys system ID */
-#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0
-#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4
-
-/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */
-#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4
-/* Number of maps */
-#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0
-
-/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164
-/* Index of the map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0
-/* Options for the map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */
-/* Address of start of map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12
-/* Length of address map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20
-/* Component information field */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24
-/* License expiry data for map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32
-/* Name of the component */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128
-
-/* MC_CMD_FC_OUT_READ_MAP msgresponse */
-#define MC_CMD_FC_OUT_READ_MAP_LEN 0
-
-/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */
-#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8
-/* Number of internal ports */
-#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0
-/* Number of external ports */
-#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4
-
-/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */
-#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4
-#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0
-
-/* MC_CMD_FC_OUT_IO_REL msgresponse */
-#define MC_CMD_FC_OUT_IO_REL_LEN 0
-
-/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */
-#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8
-#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0
-#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4
-
-/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */
-#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4
-#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252
-#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num))
-#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0
-#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4
-#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1
-#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63
-
-/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */
-#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0
-
-/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16
-/* Transceiver Transmit settings */
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16
-/* Transceiver Receive settings */
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16
-/* Rx eye opening */
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16
-/* PCS status word */
-#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16
-/* Link status word */
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1
-/* Current SFp parameters applied */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20
-/* Link speed is 100, 1000, 10000 */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24
-/* Length of copper cable - zero when not relevant */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28
-/* True if a dual speed SFP+ module */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32
-/* True if an SFP Module is present (other fields valid when true) */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36
-/* The type of the SFP+ Module */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40
-/* PHY config flags */
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1
-
-/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20
-/* MAC configuration applied */
-#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0
-/* MTU size */
-#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4
-/* IF Mode status */
-#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8
-/* MAC address configured */
-#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12
-#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8
-#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12
-#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16
-
-/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3)
-/* Rx Eye measurements */
-#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0
-#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4
-#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK
-
-/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0
-
-/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3)
-/* Has the eye plot dump completed and data returned is valid? */
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0
-/* Rx Eye binary plot */
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK
-
-/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0
-
-/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0
-
-/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4
-#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0
-
-/* MC_CMD_FC_OUT_UHLINK msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_LEN 0
-
-/* MC_CMD_FC_OUT_SET_LINK msgresponse */
-#define MC_CMD_FC_OUT_SET_LINK_LEN 0
-
-/* MC_CMD_FC_OUT_LICENSE msgresponse */
-#define MC_CMD_FC_OUT_LICENSE_LEN 12
-/* Count of valid keys */
-#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0
-/* Count of invalid keys */
-#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4
-/* Count of blacklisted keys */
-#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8
-
-/* MC_CMD_FC_OUT_STARTUP msgresponse */
-#define MC_CMD_FC_OUT_STARTUP_LEN 4
-/* Capabilities of the FPGA/FC */
-#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0
-#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0
-#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1
-
-/* MC_CMD_FC_OUT_DMA_READ msgresponse */
-#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1
-#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252
-#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num))
-/* The data read */
-#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0
-#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1
-#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1
-#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252
-
-/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */
-#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4
-/* Timer handle */
-#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0
-
-/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52
-/* Host supplied handle (unique) */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0
-/* Address into which to transfer data in host */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8
-/* AOE address from which to transfer data */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16
-/* Length of AOE transfer (total) */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20
-/* Length of host transfer (total) */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24
-/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28
-#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32
-/* When active, start read time */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40
-/* When active, end read time */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48
-
-/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */
-#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0
-
-/* MC_CMD_FC_OUT_LOG msgresponse */
-#define MC_CMD_FC_OUT_LOG_LEN 0
-
-/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20
-
-/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */
-#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */
-#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */
-#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1
-
-/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4
-
-/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num))
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31
-
-/* MC_CMD_FC_OUT_SPI_READ msgresponse */
-#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4
-#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252
-#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num))
-#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0
-#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4
-#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1
-#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63
-
-/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */
-#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0
-
-/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */
-#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */
-#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8
-/* The 32-bit value read from the toggle count register */
-#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0
-/* The 32-bit value read from the clock enable count register */
-#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4
-
-/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */
-#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8
-/* DDR soak test status word; bits [4:0] are relevant. */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1
-/* DDR soak test error count */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4
-
-/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0
-
-
-/***********************************/
-/* MC_CMD_AOE
- * AOE operations on MC
- */
-#define MC_CMD_AOE 0xa
-
-/* MC_CMD_AOE_IN msgrequest */
-#define MC_CMD_AOE_IN_LEN 4
-#define MC_CMD_AOE_IN_OP_HDR_OFST 0
-#define MC_CMD_AOE_IN_OP_LBN 0
-#define MC_CMD_AOE_IN_OP_WIDTH 8
-/* enum: FPGA and CPLD information */
-#define MC_CMD_AOE_OP_INFO 0x1
-/* enum: Currents and voltages read from MCP3424s; DEBUG */
-#define MC_CMD_AOE_OP_CURRENTS 0x2
-/* enum: Temperatures at locations around the PCB; DEBUG */
-#define MC_CMD_AOE_OP_TEMPERATURES 0x3
-/* enum: Set CPLD to idle */
-#define MC_CMD_AOE_OP_CPLD_IDLE 0x4
-/* enum: Read from CPLD register */
-#define MC_CMD_AOE_OP_CPLD_READ 0x5
-/* enum: Write to CPLD register */
-#define MC_CMD_AOE_OP_CPLD_WRITE 0x6
-/* enum: Execute CPLD instruction */
-#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7
-/* enum: Reprogram the CPLD on the AOE device */
-#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8
-/* enum: AOE power control */
-#define MC_CMD_AOE_OP_POWER 0x9
-/* enum: AOE image loading */
-#define MC_CMD_AOE_OP_LOAD 0xa
-/* enum: Fan monitoring */
-#define MC_CMD_AOE_OP_FAN_CONTROL 0xb
-/* enum: Fan failures since last reset */
-#define MC_CMD_AOE_OP_FAN_FAILURES 0xc
-/* enum: Get generic AOE MAC statistics */
-#define MC_CMD_AOE_OP_MAC_STATS 0xd
-/* enum: Retrieve PHY specific information */
-#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe
-/* enum: Write a number of JTAG primitive commands, return will give data */
-#define MC_CMD_AOE_OP_JTAG_WRITE 0xf
-/* enum: Control access to the FPGA via the Siena JTAG Chain */
-#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10
-/* enum: Set the MTU offset between Siena and AOE MACs */
-#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11
-/* enum: How link state is handled */
-#define MC_CMD_AOE_OP_LINK_STATE 0x12
-/* enum: How Siena MAC statistics are reported (deprecated - use
- * MC_CMD_AOE_OP_ASIC_STATS)
- */
-#define MC_CMD_AOE_OP_SIENA_STATS 0x13
-/* enum: How native ASIC MAC statistics are reported - replaces the deprecated
- * command MC_CMD_AOE_OP_SIENA_STATS
- */
-#define MC_CMD_AOE_OP_ASIC_STATS 0x13
-/* enum: DDR memory information */
-#define MC_CMD_AOE_OP_DDR 0x14
-/* enum: FC control */
-#define MC_CMD_AOE_OP_FC 0x15
-/* enum: DDR ECC status reads */
-#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16
-/* enum: Commands for MC-SPI Master emulation */
-#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17
-/* enum: Commands for FC boot control */
-#define MC_CMD_AOE_OP_FC_BOOT 0x18
-
-/* MC_CMD_AOE_OUT msgresponse */
-#define MC_CMD_AOE_OUT_LEN 0
-
-/* MC_CMD_AOE_IN_INFO msgrequest */
-#define MC_CMD_AOE_IN_INFO_LEN 4
-#define MC_CMD_AOE_IN_CMD_OFST 0
-
-/* MC_CMD_AOE_IN_CURRENTS msgrequest */
-#define MC_CMD_AOE_IN_CURRENTS_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */
-#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */
-#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_CPLD_READ msgrequest */
-#define MC_CMD_AOE_IN_CPLD_READ_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4
-#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8
-
-/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */
-#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4
-#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8
-#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12
-
-/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */
-#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4
-
-/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4
-/* enum: Reprogram CPLD, poll for completion */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1
-/* enum: Reprogram CPLD, send event on completion */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3
-/* enum: Get status of reprogramming operation */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4
-
-/* MC_CMD_AOE_IN_POWER msgrequest */
-#define MC_CMD_AOE_IN_POWER_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* Turn on or off AOE power */
-#define MC_CMD_AOE_IN_POWER_OP_OFST 4
-/* enum: Turn off FPGA power */
-#define MC_CMD_AOE_IN_POWER_OFF 0x0
-/* enum: Turn on FPGA power */
-#define MC_CMD_AOE_IN_POWER_ON 0x1
-/* enum: Clear peak power measurement */
-#define MC_CMD_AOE_IN_POWER_CLEAR 0x2
-/* enum: Show current power in sensors output */
-#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3
-/* enum: Show peak power in sensors output */
-#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4
-/* enum: Show current DDR current */
-#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5
-/* enum: Show peak DDR current */
-#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6
-/* enum: Clear peak DDR current */
-#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7
-
-/* MC_CMD_AOE_IN_LOAD msgrequest */
-#define MC_CMD_AOE_IN_LOAD_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence
- */
-#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4
-
-/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */
-#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* If non zero report measured fan RPM rather than nominal */
-#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4
-
-/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */
-#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_MAC_STATS msgrequest */
-#define MC_CMD_AOE_IN_MAC_STATS_LEN 24
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* AOE port */
-#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4
-/* Host memory address for statistics */
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12
-#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1
-#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16
-#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16
-/* Length of DMA data (optional) */
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20
-
-/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */
-#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* AOE port */
-#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4
-#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8
-
-/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */
-#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12
-#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252
-#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num))
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61
-
-/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */
-#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* Enable or disable access */
-#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4
-/* enum: Enable access */
-#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1
-/* enum: Disable access */
-#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2
-
-/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4
-/* enum: Apply to all external ports */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000
-/* enum: Apply to all internal ports */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000
-/* The MTU offset to be applied to the external ports */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8
-
-/* MC_CMD_AOE_IN_LINK_STATE msgrequest */
-#define MC_CMD_AOE_IN_LINK_STATE_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4
-#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0
-#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8
-/* enum: AOE and associated external port */
-#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0
-/* enum: AOE and OR of all external ports */
-#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1
-/* enum: Individual ports */
-#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2
-/* enum: Configure link state mode on given AOE port */
-#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3
-#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8
-#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8
-/* enum: No-op */
-#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0
-/* enum: logical OR of all SFP ports link status */
-#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1
-/* enum: logical AND of all SFP ports link status */
-#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2
-#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16
-#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16
-
-/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */
-#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* How MAC statistics are reported */
-#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4
-/* enum: Statistics from Siena (default) */
-#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0
-/* enum: Statistics from AOE external ports */
-#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1
-
-/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */
-#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* How MAC statistics are reported */
-#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4
-/* enum: Statistics from the ASIC (default) */
-#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0
-/* enum: Statistics from AOE external ports */
-#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1
-
-/* MC_CMD_AOE_IN_DDR msgrequest */
-#define MC_CMD_AOE_IN_DDR_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_DDR_BANK_OFST 4
-/* Enum values, see field(s): */
-/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
-/* Page index of SPD data */
-#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8
-
-/* MC_CMD_AOE_IN_FC msgrequest */
-#define MC_CMD_AOE_IN_FC_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */
-#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4
-/* Enum values, see field(s): */
-/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
-
-/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* Basic commands for MC SPI Master emulation. */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4
-/* enum: MC SPI read */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0
-/* enum: MC SPI write */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1
-
-/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8
-
-/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12
-
-/* MC_CMD_AOE_IN_FC_BOOT msgrequest */
-#define MC_CMD_AOE_IN_FC_BOOT_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* FC boot control flags */
-#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4
-#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0
-#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1
-
-/* MC_CMD_AOE_OUT_INFO msgresponse */
-#define MC_CMD_AOE_OUT_INFO_LEN 44
-/* JTAG IDCODE of CPLD */
-#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0
-/* Version of CPLD */
-#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4
-/* JTAG IDCODE of FPGA */
-#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8
-/* JTAG USERCODE of FPGA */
-#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12
-/* FPGA type - read from CPLD straps */
-#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16
-#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */
-#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */
-/* FPGA state (debug) */
-#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20
-/* FPGA image - partition from which loaded */
-#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24
-/* FC state */
-#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28
-/* enum: Set if watchdog working */
-#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1
-/* enum: Set if MC-FC communications working */
-#define MC_CMD_AOE_OUT_INFO_COMMS 0x2
-/* Random pieces of information */
-#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32
-/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */
-#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1
-/* enum: CPLD apparently good */
-#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2
-/* enum: FPGA working normally */
-#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4
-/* enum: FPGA is powered */
-#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8
-/* enum: Board has incompatible SODIMMs fitted */
-#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10
-/* enum: Board has ByteBlaster connected */
-#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20
-/* enum: FPGA Boot flash has an invalid header. */
-#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40
-/* enum: FPGA Application flash is accessible. */
-#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80
-/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */
-#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36
-#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */
-#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */
-#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */
-#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */
-#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */
-/* Result of FC booting - not valid while a ByteBlaster is connected. */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40
-/* enum: No error */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0
-/* enum: Bad address set in CPLD */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1
-/* enum: Bad header */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2
-/* enum: Bad text section details */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3
-/* enum: Bad checksum */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
-/* enum: Bad BSP */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
-/* enum: Flash mode is invalid */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
-/* enum: FC application loaded and execution attempted */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
-/* enum: FC application Started */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81
-/* enum: No bootrom in FPGA */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff
-
-/* MC_CMD_AOE_OUT_CURRENTS msgresponse */
-#define MC_CMD_AOE_OUT_CURRENTS_LEN 68
-/* Set of currents and voltages (mA or mV as appropriate) */
-#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0
-#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4
-#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17
-#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */
-
-/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */
-#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40
-/* Set of temperatures */
-#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0
-#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4
-#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10
-/* enum: The first set of enum values are for Modena code. */
-#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0
-#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */
-/* enum: The second set of enum values are for Sorrento code. */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */
-
-/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */
-#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4
-/* The value read from the CPLD */
-#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0
-
-/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */
-#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4
-#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252
-#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num))
-/* Failure counts for each fan */
-#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0
-#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4
-#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1
-#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63
-
-/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */
-#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4
-/* Results of status command (only) */
-#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0
-
-/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */
-#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0
-
-/* MC_CMD_AOE_OUT_POWER_ON msgresponse */
-#define MC_CMD_AOE_OUT_POWER_ON_LEN 0
-
-/* MC_CMD_AOE_OUT_LOAD msgresponse */
-#define MC_CMD_AOE_OUT_LOAD_LEN 0
-
-/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */
-#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0
-
-/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA
- * for details
- */
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
-
-/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num))
-/* in bytes */
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248
-
-/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */
-#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12
-#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252
-#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num))
-/* Used to align the in and out data blocks so the MC can re-use the cmd */
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0
-/* out bytes */
-#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61
-
-/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */
-#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0
-
-/* MC_CMD_AOE_OUT_DDR msgresponse */
-#define MC_CMD_AOE_OUT_DDR_LENMIN 17
-#define MC_CMD_AOE_OUT_DDR_LENMAX 252
-#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num))
-/* Information on the module. */
-#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0
-#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0
-#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1
-#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2
-#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3
-#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1
-/* Memory size, in MB. */
-#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4
-/* The memory type, as reported from SPD information */
-#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8
-/* Nominal voltage of the module (as applied) */
-#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12
-/* SPD data read from the module */
-#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16
-#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1
-#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1
-#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236
-
-/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */
-#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0
-
-/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */
-#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0
-
-/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */
-#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0
-
-/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */
-#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0
-
-/* MC_CMD_AOE_OUT_FC msgresponse */
-#define MC_CMD_AOE_OUT_FC_LEN 0
-
-/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8
-/* Flags describing status info on the module. */
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1
-/* DDR ECC status on the module. */
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8
-
-/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */
-#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4
-#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0
-
-/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */
-#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0
-
-/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */
-#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0
-
-/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */
-#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0
-
-
-/***********************************/
/* MC_CMD_PTP
* Perform PTP operation
*/
@@ -3616,41 +1344,54 @@
#define MC_CMD_PTP_OP_ENABLE 0x1
/* enum: Disable PTP packet timestamping operation. */
#define MC_CMD_PTP_OP_DISABLE 0x2
-/* enum: Send a PTP packet. */
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
#define MC_CMD_PTP_OP_TRANSMIT 0x3
/* enum: Read the current NIC time. */
#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
-/* enum: Get the current PTP status. */
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
#define MC_CMD_PTP_OP_STATUS 0x5
/* enum: Adjust the PTP NIC's time. */
#define MC_CMD_PTP_OP_ADJUST 0x6
/* enum: Synchronize host and NIC time. */
#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
-/* enum: Basic manufacturing tests. */
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
-/* enum: Packet based manufacturing tests. */
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
/* enum: Reset some of the PTP related statistics */
#define MC_CMD_PTP_OP_RESET_STATS 0xa
/* enum: Debug operations to MC. */
#define MC_CMD_PTP_OP_DEBUG 0xb
-/* enum: Read an FPGA register */
+/* enum: Read an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAREAD 0xc
-/* enum: Write an FPGA register */
+/* enum: Write an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAWRITE 0xd
/* enum: Apply an offset to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
-/* enum: Change Apply an offset to the NIC clock */
+/* enum: Change the frequency correction applied to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
-/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
-/* enum: Set the MC packet filter UUID for received PTP packets */
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
-/* enum: Set the MC packet filter Domain for received PTP packets */
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
-/* enum: Set the clock source */
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
-/* enum: Reset value of Timer Reg. */
+/* enum: Reset value of Timer Reg. Not implemented. */
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
@@ -3671,7 +1412,7 @@
/* enum: Unsubscribe to stop receiving time events */
#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
- * input on the same NIC.
+ * input on the same NIC. Siena PTP adapters only.
*/
#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Set the PTP sync status. Status is used by firmware to report to event
@@ -3684,11 +1425,15 @@
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_CMD_LEN 4
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
-/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
-/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
/* enum: PTP, version 1 */
#define MC_CMD_PTP_MODE_V1 0x0
/* enum: PTP, version 1, with VLAN headers - deprecated */
@@ -3705,16 +1450,21 @@
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Transmit packet length */
#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
/* Transmit packet data */
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
@@ -3724,17 +1474,30 @@
/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_STATUS msgrequest */
#define MC_CMD_PTP_IN_STATUS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_ADJUST msgrequest */
#define MC_CMD_PTP_IN_ADJUST_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
@@ -3742,21 +1505,67 @@
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
/* enum: Number of fractional bits in frequency adjustment */
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of time readings to capture */
#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
/* Host address in which to write "synchronization started" indication (64
* bits)
*/
@@ -3768,42 +1577,58 @@
/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Enable or disable packet testing */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
-/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */
#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
-/* Reset PTP statistics */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_DEBUG msgrequest */
#define MC_CMD_PTP_IN_DEBUG_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Debug operations */
#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
@@ -3812,34 +1637,67 @@
/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
-/* enum: Number of fractional bits in frequency adjustment */
-/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* Enum values, see field(s): */
+/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of VLAN tags, 0 if not VLAN */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
/* Set of VLAN tags to filter against */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
@@ -3848,9 +1706,12 @@
/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable UUID filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
/* UUID to filter against */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
@@ -3860,62 +1721,82 @@
/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable Domain filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
/* Domain number to filter against */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Set the clock source. */
#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
/* enum: Internal. */
#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
/* enum: External. */
#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
-/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */
#define MC_CMD_PTP_IN_RST_CLK_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
-/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Enable or disable */
#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
/* enum: Enable */
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queue id to send events back */
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Original field containing queue ID. Now extended to include flags. */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
@@ -3924,29 +1805,39 @@
/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Unsubscribe options */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
/* enum: Unsubscribe a single queue */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
/* enum: Unsubscribe all queues */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
/* Event queue ID */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable PPS test mode, 0 to disable and return result. */
#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* NIC - Host System Clock Synchronization status */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
/* enum: Host System clock and NIC clock are not in sync */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
/* enum: Host System clock and NIC clock are synchronized */
@@ -3955,8 +1846,11 @@
* no longer in sync.
*/
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -3965,12 +1859,16 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
@@ -3982,47 +1880,85 @@
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
/* Frequency of NIC's hardware clock */
#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
/* Number of packets transmitted and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
/* Number of packets received and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
/* Number of packets timestamped by the FPGA */
#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
/* Number of packets filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
/* Number of packets not filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
/* Number of PPS overflows (noise on input?) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
@@ -4035,23 +1971,31 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
/* Host time immediately before NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
/* Number of nanoseconds waited after reading NIC's hardware clock */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
/* enum: Successful test */
#define MC_CMD_PTP_MANF_SUCCESS 0x0
/* enum: FPGA load failed */
@@ -4084,15 +2028,19 @@
#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
/* Number of packets received by FPGA */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
/* Number of packets received by Siena filters */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
@@ -4108,9 +2056,11 @@
/* Time format required/used by for this NIC. Applies to all PTP MCDI
* operations that pass times between the host and firmware. If this operation
* is not supported (older firmware) a format of seconds and nanoseconds should
- * be assumed.
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
*/
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
@@ -4126,12 +2076,16 @@
* be assumed.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
/* enum: Major register has units of seconds, minor 2^-27s per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
/* Minimum acceptable value for a corrected synchronization timeset. When
* comparing host and NIC clock times, the MC returns a set of samples that
* contain the host start and end time, the MC time when the host start was
@@ -4140,46 +2094,66 @@
* end and start times minus the time that the MC waited for host end.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
/* Various PTP capabilities */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
@@ -4194,14 +2168,17 @@
#define MC_CMD_CSR_READ32 0xc
#undef MC_CMD_0xc_PRIVILEGE_CTG
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
/* Address */
#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_CSR_READ32_OUT msgresponse */
#define MC_CMD_CSR_READ32_OUT_LENMIN 4
@@ -4221,7 +2198,7 @@
#define MC_CMD_CSR_WRITE32 0xd
#undef MC_CMD_0xd_PRIVILEGE_CTG
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_WRITE32_IN msgrequest */
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
@@ -4229,7 +2206,9 @@
#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
/* Address */
#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
@@ -4238,6 +2217,7 @@
/* MC_CMD_CSR_WRITE32_OUT msgresponse */
#define MC_CMD_CSR_WRITE32_OUT_LEN 4
#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -4248,7 +2228,7 @@
#define MC_CMD_HP 0x54
#undef MC_CMD_0x54_PRIVILEGE_CTG
-#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_HP_IN msgrequest */
#define MC_CMD_HP_IN_LEN 16
@@ -4259,6 +2239,7 @@
* sensors.
*/
#define MC_CMD_HP_IN_SUBCMD_OFST 0
+#define MC_CMD_HP_IN_SUBCMD_LEN 4
/* enum: OCSD (Option Card Sensor Data) sub-command. */
#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
/* enum: Last known valid HP sub-command. */
@@ -4273,10 +2254,12 @@
* NULL.)
*/
#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
/* MC_CMD_HP_OUT msgresponse */
#define MC_CMD_HP_OUT_LEN 4
#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
/* enum: OCSD stopped for this card. */
#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
/* enum: OCSD was successfully started with the address provided. */
@@ -4323,29 +2306,35 @@
* external devices.
*/
#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
/* enum: Internal. */
#define MC_CMD_MDIO_BUS_INTERNAL 0x0
/* enum: External. */
#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
/* Port address */
#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
#define MC_CMD_MDIO_CLAUSE22 0x20
/* Address */
#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
/* MC_CMD_MDIO_READ_OUT msgresponse */
#define MC_CMD_MDIO_READ_OUT_LEN 8
/* Value */
#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
/* Status the MDIO commands return the raw status bits from the MDIO block. A
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
/* enum: Good. */
#define MC_CMD_MDIO_STATUS_GOOD 0x8
@@ -4365,22 +2354,27 @@
* external devices.
*/
#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
/* enum: Internal. */
/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
/* enum: External. */
/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
/* Port address */
#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
/* MC_CMD_MDIO_CLAUSE22 0x20 */
/* Address */
#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
/* MC_CMD_MDIO_WRITE_OUT msgresponse */
#define MC_CMD_MDIO_WRITE_OUT_LEN 4
@@ -4388,6 +2382,7 @@
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
/* enum: Good. */
/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
@@ -4399,7 +2394,7 @@
#define MC_CMD_DBI_WRITE 0x12
#undef MC_CMD_0x12_PRIVILEGE_CTG
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_WRITE_IN msgrequest */
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
@@ -4419,9 +2414,11 @@
/* MC_CMD_DBIWROP_TYPEDEF structuredef */
#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -4431,6 +2428,7 @@
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -4446,13 +2444,16 @@
#define MC_CMD_PORT_READ32_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ32_OUT msgresponse */
#define MC_CMD_PORT_READ32_OUT_LEN 8
/* Value */
#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
/* Status */
#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
/***********************************/
@@ -4466,13 +2467,16 @@
#define MC_CMD_PORT_WRITE32_IN_LEN 8
/* Address */
#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
/* MC_CMD_PORT_WRITE32_OUT msgresponse */
#define MC_CMD_PORT_WRITE32_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -4486,6 +2490,7 @@
#define MC_CMD_PORT_READ128_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ128_OUT msgresponse */
#define MC_CMD_PORT_READ128_OUT_LEN 20
@@ -4494,6 +2499,7 @@
#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
/* Status */
#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
/***********************************/
@@ -4507,6 +2513,7 @@
#define MC_CMD_PORT_WRITE128_IN_LEN 20
/* Address */
#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
@@ -4515,6 +2522,7 @@
#define MC_CMD_PORT_WRITE128_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
/* MC_CMD_CAPABILITIES structuredef */
#define MC_CMD_CAPABILITIES_LEN 4
@@ -4560,24 +2568,54 @@
#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-/* See MC_CMD_CAPABILITIES */
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-/* This field contains a 16-bit value for each of the types of NVRAM area. The
- * values are defined in the firmware/mc/platform/.c file for a specific board
- * type, but otherwise have no meaning to the MC; they are used by the driver
- * to manage selection of appropriate firmware updates.
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
*/
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
@@ -4592,7 +2630,7 @@
#define MC_CMD_DBI_READX 0x19
#undef MC_CMD_0x19_PRIVILEGE_CTG
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_READX_IN msgrequest */
#define MC_CMD_DBI_READX_IN_LENMIN 8
@@ -4619,9 +2657,11 @@
/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -4639,7 +2679,7 @@
#define MC_CMD_SET_RAND_SEED 0x1a
#undef MC_CMD_0x1a_PRIVILEGE_CTG
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
@@ -4689,14 +2729,25 @@
#define MC_CMD_DRV_ATTACH_IN_LEN 12
/* new state to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
#define MC_CMD_DRV_ATTACH_LBN 0
#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1
#define MC_CMD_DRV_PREBOOT_LBN 1
#define MC_CMD_DRV_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1
/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
/* preferred datapath firmware (for Huntington; ignored for Siena) */
#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
/* enum: Prefer to use full featured firmware */
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
@@ -4713,20 +2764,35 @@
* support
*/
#define MC_CMD_FW_RULES_ENGINE 0x5
+/* enum: Prefer to use firmware with additional DPDK support */
+#define MC_CMD_FW_DPDK 0x6
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+#define MC_CMD_FW_L3XUDP 0x7
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe
/* enum: Only this option is allowed for non-admin functions */
-#define MC_CMD_FW_DONT_CARE 0xffffffff
+#define MC_CMD_FW_DONT_CARE 0xffffffff
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -4739,6 +2805,11 @@
* refers to the Sorrento external FPGA port.
*/
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+/* enum: If set, indicates that VI spreading is currently enabled. Will always
+ * indicate the current state, regardless of the value in the WANT_VI_SPREADING
+ * input.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4
/***********************************/
@@ -4751,6 +2822,7 @@
#define MC_CMD_SHMUART_IN_LEN 4
/* ??? */
#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_FLAG_LEN 4
/* MC_CMD_SHMUART_OUT msgresponse */
#define MC_CMD_SHMUART_OUT_LEN 0
@@ -4789,6 +2861,7 @@
* (TBD).
*/
#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -4806,8 +2879,10 @@
#define MC_CMD_PCIE_CREDITS_IN_LEN 8
/* poll period. 0 is disabled */
#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
/* wipe statistics */
#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
@@ -4838,31 +2913,54 @@
/* MC_CMD_RXD_MONITOR_IN msgrequest */
#define MC_CMD_RXD_MONITOR_IN_LEN 12
#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
/* MC_CMD_RXD_MONITOR_OUT msgresponse */
#define MC_CMD_RXD_MONITOR_OUT_LEN 80
#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
/***********************************/
@@ -4872,13 +2970,14 @@
#define MC_CMD_PUTS 0x23
#undef MC_CMD_0x23_PRIVILEGE_CTG
-#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
#define MC_CMD_PUTS_IN_LENMAX 252
#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_DEST_LEN 4
#define MC_CMD_PUTS_IN_UART_LBN 0
#define MC_CMD_PUTS_IN_UART_WIDTH 1
#define MC_CMD_PUTS_IN_PORT_LBN 1
@@ -4911,6 +3010,7 @@
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
@@ -4927,8 +3027,10 @@
#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
#define MC_CMD_PHY_CAP_10HDX_LBN 1
#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
#define MC_CMD_PHY_CAP_10FDX_LBN 2
@@ -4953,17 +3055,39 @@
#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
#define MC_CMD_PHY_CAP_DDM_LBN 12
#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
/* enum: Xaui. */
#define MC_CMD_MEDIA_XAUI 0x1
/* enum: CX4. */
@@ -4979,6 +3103,7 @@
/* enum: QSFP+. */
#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -5004,12 +3129,13 @@
#define MC_CMD_START_BIST 0x25
#undef MC_CMD_0x25_PRIVILEGE_CTG
-#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
/* Type of test. */
#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_LEN 4
/* enum: Run the PHY's short cable BIST. */
#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
/* enum: Run the PHY's long cable BIST. */
@@ -5043,7 +3169,7 @@
#define MC_CMD_POLL_BIST 0x26
#undef MC_CMD_0x26_PRIVILEGE_CTG
-#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -5052,6 +3178,7 @@
#define MC_CMD_POLL_BIST_OUT_LEN 8
/* result */
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
/* enum: Running. */
#define MC_CMD_POLL_BIST_RUNNING 0x1
/* enum: Passed. */
@@ -5061,19 +3188,26 @@
/* enum: Timed-out. */
#define MC_CMD_POLL_BIST_TIMEOUT 0x4
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
/* Status of each channel A */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
/* enum: Ok. */
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
/* enum: Open. */
@@ -5086,14 +3220,17 @@
#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
/* Status of each channel B */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel C */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel D */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
@@ -5101,9 +3238,11 @@
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
/* enum: Complete. */
#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
/* enum: Bus switch off I2C write. */
@@ -5127,9 +3266,11 @@
#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
/* enum: Test has completed. */
#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
/* enum: RAM test - walk ones. */
@@ -5146,8 +3287,10 @@
#define MC_CMD_POLL_BIST_MEM_ECC 0x6
/* Failure address, only valid if result is POLL_BIST_FAILED */
#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
/* Bus or address space to which the failure address corresponds */
#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
/* enum: MC MIPS bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
/* enum: CSR IREG bus. */
@@ -5168,14 +3311,19 @@
#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
/* Pattern written to RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
/* Actual value read from RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
/* ECC error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
/* ECC parity error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
/* ECC fatal error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
/***********************************/
@@ -5222,83 +3370,83 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
/* enum: None. */
-#define MC_CMD_LOOPBACK_NONE 0x0
+#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
-#define MC_CMD_LOOPBACK_DATA 0x1
+#define MC_CMD_LOOPBACK_DATA 0x1
/* enum: GMAC. */
-#define MC_CMD_LOOPBACK_GMAC 0x2
+#define MC_CMD_LOOPBACK_GMAC 0x2
/* enum: XGMII. */
#define MC_CMD_LOOPBACK_XGMII 0x3
/* enum: XGXS. */
-#define MC_CMD_LOOPBACK_XGXS 0x4
+#define MC_CMD_LOOPBACK_XGXS 0x4
/* enum: XAUI. */
-#define MC_CMD_LOOPBACK_XAUI 0x5
+#define MC_CMD_LOOPBACK_XAUI 0x5
/* enum: GMII. */
-#define MC_CMD_LOOPBACK_GMII 0x6
+#define MC_CMD_LOOPBACK_GMII 0x6
/* enum: SGMII. */
-#define MC_CMD_LOOPBACK_SGMII 0x7
+#define MC_CMD_LOOPBACK_SGMII 0x7
/* enum: XGBR. */
-#define MC_CMD_LOOPBACK_XGBR 0x8
+#define MC_CMD_LOOPBACK_XGBR 0x8
/* enum: XFI. */
-#define MC_CMD_LOOPBACK_XFI 0x9
+#define MC_CMD_LOOPBACK_XFI 0x9
/* enum: XAUI Far. */
-#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
/* enum: GMII Far. */
-#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
/* enum: SGMII Far. */
-#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
/* enum: XFI Far. */
-#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
/* enum: GPhy. */
-#define MC_CMD_LOOPBACK_GPHY 0xe
+#define MC_CMD_LOOPBACK_GPHY 0xe
/* enum: PhyXS. */
-#define MC_CMD_LOOPBACK_PHYXS 0xf
+#define MC_CMD_LOOPBACK_PHYXS 0xf
/* enum: PCS. */
-#define MC_CMD_LOOPBACK_PCS 0x10
+#define MC_CMD_LOOPBACK_PCS 0x10
/* enum: PMA-PMD. */
-#define MC_CMD_LOOPBACK_PMAPMD 0x11
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
/* enum: Cross-Port. */
-#define MC_CMD_LOOPBACK_XPORT 0x12
+#define MC_CMD_LOOPBACK_XPORT 0x12
/* enum: XGMII-Wireside. */
-#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
/* enum: XAUI Wireside. */
-#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
/* enum: XAUI Wireside Far. */
-#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
/* enum: XAUI Wireside near. */
-#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
/* enum: GMII Wireside. */
-#define MC_CMD_LOOPBACK_GMII_WS 0x17
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
/* enum: XFI Wireside. */
-#define MC_CMD_LOOPBACK_XFI_WS 0x18
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
/* enum: XFI Wireside Far. */
-#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
/* enum: PhyXS Wireside. */
-#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
/* enum: PMA lanes MAC-Serdes. */
-#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
/* enum: KR Serdes Parallel (Encoder). */
-#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
/* enum: KR Serdes Serial. */
-#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
/* enum: PMA lanes MAC-Serdes Wireside. */
-#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
/* enum: KR Serdes Parallel Wireside (Full PCS). */
-#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
-#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
-#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
/* enum: KR Serdes Serial Wireside. */
-#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
/* enum: Near side of AOE Siena side port */
-#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
/* enum: Medford Wireside datapath loopback */
-#define MC_CMD_LOOPBACK_DATA_WS 0x24
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
/* enum: Force link up without setting up any physical loopback (snapper use
* only)
*/
-#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -5328,6 +3476,174 @@
/* Enum values, see field(s): */
/* 100M */
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/* Enum values, see field(s): */
+/* 100M */
+
+/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */
+#define AN_TYPE_LEN 4
+#define AN_TYPE_TYPE_OFST 0
+#define AN_TYPE_TYPE_LEN 4
+/* enum: None, AN disabled or not supported */
+#define MC_CMD_AN_NONE 0x0
+/* enum: Clause 28 - BASE-T */
+#define MC_CMD_AN_CLAUSE28 0x1
+/* enum: Clause 37 - BASE-X */
+#define MC_CMD_AN_CLAUSE37 0x2
+/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable
+ * assemblies. Includes Clause 72/Clause 92 link-training.
+ */
+#define MC_CMD_AN_CLAUSE73 0x3
+#define AN_TYPE_TYPE_LBN 0
+#define AN_TYPE_TYPE_WIDTH 32
+
+/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3
+ */
+#define FEC_TYPE_LEN 4
+#define FEC_TYPE_TYPE_OFST 0
+#define FEC_TYPE_TYPE_LEN 4
+/* enum: No FEC */
+#define MC_CMD_FEC_NONE 0x0
+/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+#define MC_CMD_FEC_BASER 0x1
+/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+#define MC_CMD_FEC_RS 0x2
+#define FEC_TYPE_TYPE_LBN 0
+#define FEC_TYPE_TYPE_WIDTH 32
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -5344,19 +3660,28 @@
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
-/* near-side advertised capabilities */
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
-/* link-partner advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
/* Autonegotiated speed in mbit/s. The link may still be down even if this
* reads non-zero.
*/
#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
/* Current loopback setting. */
#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
@@ -5371,9 +3696,11 @@
#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
@@ -5383,6 +3710,97 @@
#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */
+#define MC_CMD_GET_LINK_OUT_V2_LEN 44
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */
+/* True local device capabilities (taking into account currently used PMD/MDI,
+ * e.g. plugged-in module). In general, subset of
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST
+ * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal
+ * to SUPPORTED_CAP for non-pluggable PMDs. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4
+/* Auto-negotiation type used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Forward error correction used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1
+
/***********************************/
/* MC_CMD_SET_LINK
@@ -5396,10 +3814,14 @@
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
-/* ??? */
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_CAP_LEN 4
/* Flags */
#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4
#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
@@ -5408,12 +3830,14 @@
#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
/* Loopback mode. */
#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
/* A loopback speed of "0" is supported, and means (choose any available
* speed).
*/
#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -5432,9 +3856,10 @@
#define MC_CMD_SET_ID_LED_IN_LEN 4
/* Set LED state. */
#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
-#define MC_CMD_LED_OFF 0x0 /* enum */
-#define MC_CMD_LED_ON 0x1 /* enum */
-#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
/* MC_CMD_SET_ID_LED_OUT msgresponse */
#define MC_CMD_SET_ID_LED_OUT_LEN 0
@@ -5455,17 +3880,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
#define MC_CMD_FCNTL_OFF 0x0
/* enum: Respond to flow control. */
@@ -5479,6 +3908,7 @@
/* enum: Issue flow control. */
#define MC_CMD_FCNTL_GENERATE 0x5
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
@@ -5488,17 +3918,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
@@ -5512,6 +3946,7 @@
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
/* Select which parameters to configure. A parameter will only be modified if
@@ -5520,6 +3955,7 @@
* set).
*/
#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
@@ -5541,6 +3977,7 @@
* to 0.
*/
#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
/***********************************/
@@ -5574,53 +4011,53 @@
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
/* enum: OUI. */
-#define MC_CMD_OUI 0x0
+#define MC_CMD_OUI 0x0
/* enum: PMA-PMD Link Up. */
-#define MC_CMD_PMA_PMD_LINK_UP 0x1
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
/* enum: PMA-PMD RX Fault. */
-#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
/* enum: PMA-PMD TX Fault. */
-#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
/* enum: PMA-PMD Signal */
-#define MC_CMD_PMA_PMD_SIGNAL 0x4
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
/* enum: PMA-PMD SNR A. */
-#define MC_CMD_PMA_PMD_SNR_A 0x5
+#define MC_CMD_PMA_PMD_SNR_A 0x5
/* enum: PMA-PMD SNR B. */
-#define MC_CMD_PMA_PMD_SNR_B 0x6
+#define MC_CMD_PMA_PMD_SNR_B 0x6
/* enum: PMA-PMD SNR C. */
-#define MC_CMD_PMA_PMD_SNR_C 0x7
+#define MC_CMD_PMA_PMD_SNR_C 0x7
/* enum: PMA-PMD SNR D. */
-#define MC_CMD_PMA_PMD_SNR_D 0x8
+#define MC_CMD_PMA_PMD_SNR_D 0x8
/* enum: PCS Link Up. */
-#define MC_CMD_PCS_LINK_UP 0x9
+#define MC_CMD_PCS_LINK_UP 0x9
/* enum: PCS RX Fault. */
-#define MC_CMD_PCS_RX_FAULT 0xa
+#define MC_CMD_PCS_RX_FAULT 0xa
/* enum: PCS TX Fault. */
-#define MC_CMD_PCS_TX_FAULT 0xb
+#define MC_CMD_PCS_TX_FAULT 0xb
/* enum: PCS BER. */
-#define MC_CMD_PCS_BER 0xc
+#define MC_CMD_PCS_BER 0xc
/* enum: PCS Block Errors. */
-#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
/* enum: PhyXS Link Up. */
-#define MC_CMD_PHYXS_LINK_UP 0xe
+#define MC_CMD_PHYXS_LINK_UP 0xe
/* enum: PhyXS RX Fault. */
-#define MC_CMD_PHYXS_RX_FAULT 0xf
+#define MC_CMD_PHYXS_RX_FAULT 0xf
/* enum: PhyXS TX Fault. */
-#define MC_CMD_PHYXS_TX_FAULT 0x10
+#define MC_CMD_PHYXS_TX_FAULT 0x10
/* enum: PhyXS Align. */
-#define MC_CMD_PHYXS_ALIGN 0x11
+#define MC_CMD_PHYXS_ALIGN 0x11
/* enum: PhyXS Sync. */
-#define MC_CMD_PHYXS_SYNC 0x12
+#define MC_CMD_PHYXS_SYNC 0x12
/* enum: AN link-up. */
-#define MC_CMD_AN_LINK_UP 0x13
+#define MC_CMD_AN_LINK_UP 0x13
/* enum: AN Complete. */
-#define MC_CMD_AN_COMPLETE 0x14
+#define MC_CMD_AN_COMPLETE 0x14
/* enum: AN 10GBaseT Status. */
-#define MC_CMD_AN_10GBT_STATUS 0x15
+#define MC_CMD_AN_10GBT_STATUS 0x15
/* enum: Clause 22 Link-Up. */
-#define MC_CMD_CL22_LINK_UP 0x16
+#define MC_CMD_CL22_LINK_UP 0x16
/* enum: (Last entry) */
-#define MC_CMD_PHY_NSTATS 0x17
+#define MC_CMD_PHY_NSTATS 0x17
/***********************************/
@@ -5647,6 +4084,7 @@
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
@@ -5661,9 +4099,16 @@
#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
/* port id so vadapter stats can be provided */
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -5675,141 +4120,289 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
-#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
-#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
-#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
-#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
-#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
-#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
-#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
-#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
-#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
-#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
-#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
-#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
-#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
-#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
-#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
-#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
-#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
-#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
-#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
-#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
-#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
-#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
-#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
-#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
-#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
-#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
-#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
-#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
-#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
-#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
-#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
-#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
-#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
-#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
-#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
-#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
-#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
-#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
-#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
-#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
-#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
-#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
-#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
-#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
-#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
-#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
-#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
-#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
-#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
-#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
-#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
-#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
-#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
-#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
-#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
-#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
-#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
-#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
-#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
-#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
-#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
/* enum: PM discard_bb_overflow counter. Valid for EF10 with
* PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
/* enum: PM discard_vfifo_full counter. Valid for EF10 with
* PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
/* enum: RXDP counter: Number of packets dropped due to the queue being
* disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
* with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
* PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
* Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
* descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
-#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
/* enum: Start of GMAC stats buffer space, for Siena only. */
-#define MC_CMD_GMAC_DMABUF_START 0x40
+#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
-#define MC_CMD_GMAC_DMABUF_END 0x5f
-#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
-#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define MC_CMD_MAC_GENERATION_END 0x60
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_FEC_DMABUF_START 0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V2 0x68
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define MC_CMD_MAC_CTPIO_SUCCESS 0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define MC_CMD_MAC_CTPIO_FALLBACK 0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define MC_CMD_MAC_CTPIO_POISON 0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define MC_CMD_MAC_CTPIO_ERASE 0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V3 0x79
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3)
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum: Start of V4 stats buffer space */
+#define MC_CMD_MAC_V4_DMABUF_START 0x79
+/* enum: RXDP counter: Number of packets truncated because scattering was
+ * disabled.
+ */
+#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79
+/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting
+ * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a
+/* enum: RXDP counter: Number of times the RXDP timed out while head of line
+ * blocking. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b
+/* enum: This includes the space at offset 124 which is the final
+ * GENERATION_END in a MAC_STATS_V4 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V4 0x7d
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
/***********************************/
@@ -5821,21 +4414,28 @@
/* MC_CMD_SRIOV_IN msgrequest */
#define MC_CMD_SRIOV_IN_LEN 12
#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
/* MC_CMD_SRIOV_OUT msgresponse */
#define MC_CMD_SRIOV_OUT_LEN 8
#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
/* this is only used for the first record */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
@@ -5845,6 +4445,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
@@ -5855,6 +4456,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
@@ -5907,24 +4509,26 @@
/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
-#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
/* A type value of 1 is unused. */
#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
/* enum: Magic */
-#define MC_CMD_WOL_TYPE_MAGIC 0x0
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
/* enum: MS Windows Magic */
#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
/* enum: IPv4 Syn */
-#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
/* enum: IPv6 Syn */
-#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
/* enum: Bitmap */
-#define MC_CMD_WOL_TYPE_BITMAP 0x5
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
/* enum: Link */
-#define MC_CMD_WOL_TYPE_LINK 0x6
+#define MC_CMD_WOL_TYPE_LINK 0x6
/* enum: (Above this for future use) */
-#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
@@ -5932,7 +4536,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
@@ -5941,9 +4547,13 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
@@ -5952,7 +4562,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
@@ -5965,7 +4577,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
@@ -5980,8 +4594,11 @@
/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
@@ -5990,6 +4607,7 @@
/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -6004,6 +4622,7 @@
/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
@@ -6022,6 +4641,7 @@
/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -6063,6 +4683,7 @@
#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -6120,21 +4741,28 @@
/* MC_CMD_NVRAM_INFO_IN msgrequest */
#define MC_CMD_NVRAM_INFO_IN_LEN 4
#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_INFO_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_OUT_LEN 24
#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
@@ -6142,36 +4770,51 @@
#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
*/
#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
/***********************************/
/* MC_CMD_NVRAM_UPDATE_START
* Start a group of update operations on a virtual NVRAM partition. Locks
* required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
- * PHY_LOCK required and not held).
+ * PHY_LOCK required and not held). In an adapter bound to a TSA controller,
+ * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types
+ * i.e. static config, dynamic config and expansion ROM config. Attempting to
+ * perform this operation on a restricted partition will return the error
+ * EPERM.
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
#undef MC_CMD_0x38_PRIVILEGE_CTG
@@ -6183,6 +4826,7 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
@@ -6193,9 +4837,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -6217,20 +4863,26 @@
/* MC_CMD_NVRAM_READ_IN msgrequest */
#define MC_CMD_NVRAM_READ_IN_LEN 12
#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
/* Optional control info. If a partition is stored with an A/B versioning
* scheme (i.e. in more than one physical partition in NVRAM) the host can set
* this to control which underlying physical partition is used to read data
@@ -6240,6 +4892,7 @@
* verifying by reading with MODE=TARGET_BACKUP.
*/
#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
/* enum: Same as omitting MODE: caller sees data in current partition unless it
* holds the write lock in which case it sees data in the partition it is
* updating.
@@ -6280,10 +4933,13 @@
#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
@@ -6307,10 +4963,13 @@
/* MC_CMD_NVRAM_ERASE_IN msgrequest */
#define MC_CMD_NVRAM_ERASE_IN_LEN 12
#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
@@ -6319,8 +4978,12 @@
/***********************************/
/* MC_CMD_NVRAM_UPDATE_FINISH
* Finish a group of update operations on a virtual NVRAM partition. Locks
- * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
- * type/offset/length), EACCES (if PHY_LOCK required and not held)
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/
+ * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to
+ * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of
+ * partition types i.e. static config, dynamic config and expansion ROM config.
+ * Attempting to perform this operation on a restricted partition will return
+ * the error EPERM.
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
#undef MC_CMD_0x3c_PRIVILEGE_CTG
@@ -6332,9 +4995,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
* request with additional flags indicating version of NVRAM_UPDATE commands in
@@ -6343,10 +5008,13 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -6373,6 +5041,7 @@
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
/* Result of nvram update completion processing */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
/* enum: Invalid return code; only non-zero values are defined. Defined as
* unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
*/
@@ -6407,6 +5076,8 @@
* only production signed images.
*/
#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
/***********************************/
@@ -6430,11 +5101,12 @@
#define MC_CMD_REBOOT 0x3d
#undef MC_CMD_0x3d_PRIVILEGE_CTG
-#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_IN_FLAGS_LEN 4
#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
/* MC_CMD_REBOOT_OUT msgresponse */
@@ -6473,11 +5145,12 @@
#define MC_CMD_REBOOT_MODE 0x3f
#undef MC_CMD_0x3f_PRIVILEGE_CTG
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
/* enum: Normal. */
#define MC_CMD_REBOOT_MODE_NORMAL 0x0
/* enum: Power-on Reset. */
@@ -6492,6 +5165,7 @@
/* MC_CMD_REBOOT_MODE_OUT msgresponse */
#define MC_CMD_REBOOT_MODE_OUT_LEN 4
#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
/***********************************/
@@ -6528,7 +5202,7 @@
#define MC_CMD_SENSOR_INFO 0x41
#undef MC_CMD_0x41_PRIVILEGE_CTG
-#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -6542,174 +5216,190 @@
* Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
*/
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
/* enum: Controller temperature: degC */
-#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
/* enum: Phy common temperature: degC */
-#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
/* enum: Controller cooling: bool */
-#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
/* enum: Phy 0 temperature: degC */
-#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
/* enum: Phy 0 cooling: bool */
-#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
/* enum: Phy 1 temperature: degC */
-#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
/* enum: Phy 1 cooling: bool */
-#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
/* enum: 1.0v power: mV */
-#define MC_CMD_SENSOR_IN_1V0 0x7
+#define MC_CMD_SENSOR_IN_1V0 0x7
/* enum: 1.2v power: mV */
-#define MC_CMD_SENSOR_IN_1V2 0x8
+#define MC_CMD_SENSOR_IN_1V2 0x8
/* enum: 1.8v power: mV */
-#define MC_CMD_SENSOR_IN_1V8 0x9
+#define MC_CMD_SENSOR_IN_1V8 0x9
/* enum: 2.5v power: mV */
-#define MC_CMD_SENSOR_IN_2V5 0xa
+#define MC_CMD_SENSOR_IN_2V5 0xa
/* enum: 3.3v power: mV */
-#define MC_CMD_SENSOR_IN_3V3 0xb
+#define MC_CMD_SENSOR_IN_3V3 0xb
/* enum: 12v power: mV */
-#define MC_CMD_SENSOR_IN_12V0 0xc
+#define MC_CMD_SENSOR_IN_12V0 0xc
/* enum: 1.2v analogue power: mV */
-#define MC_CMD_SENSOR_IN_1V2A 0xd
+#define MC_CMD_SENSOR_IN_1V2A 0xd
/* enum: reference voltage: mV */
-#define MC_CMD_SENSOR_IN_VREF 0xe
+#define MC_CMD_SENSOR_IN_VREF 0xe
/* enum: AOE FPGA power: mV */
-#define MC_CMD_SENSOR_OUT_VAOE 0xf
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
/* enum: AOE FPGA temperature: degC */
-#define MC_CMD_SENSOR_AOE_TEMP 0x10
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
/* enum: AOE FPGA PSU temperature: degC */
-#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
/* enum: AOE PSU temperature: degC */
-#define MC_CMD_SENSOR_PSU_TEMP 0x12
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
/* enum: Fan 0 speed: RPM */
-#define MC_CMD_SENSOR_FAN_0 0x13
+#define MC_CMD_SENSOR_FAN_0 0x13
/* enum: Fan 1 speed: RPM */
-#define MC_CMD_SENSOR_FAN_1 0x14
+#define MC_CMD_SENSOR_FAN_1 0x14
/* enum: Fan 2 speed: RPM */
-#define MC_CMD_SENSOR_FAN_2 0x15
+#define MC_CMD_SENSOR_FAN_2 0x15
/* enum: Fan 3 speed: RPM */
-#define MC_CMD_SENSOR_FAN_3 0x16
+#define MC_CMD_SENSOR_FAN_3 0x16
/* enum: Fan 4 speed: RPM */
-#define MC_CMD_SENSOR_FAN_4 0x17
+#define MC_CMD_SENSOR_FAN_4 0x17
/* enum: AOE FPGA input power: mV */
-#define MC_CMD_SENSOR_IN_VAOE 0x18
+#define MC_CMD_SENSOR_IN_VAOE 0x18
/* enum: AOE FPGA current: mA */
-#define MC_CMD_SENSOR_OUT_IAOE 0x19
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
/* enum: AOE FPGA input current: mA */
-#define MC_CMD_SENSOR_IN_IAOE 0x1a
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
/* enum: NIC power consumption: W */
-#define MC_CMD_SENSOR_NIC_POWER 0x1b
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
/* enum: 0.9v power voltage: mV */
-#define MC_CMD_SENSOR_IN_0V9 0x1c
+#define MC_CMD_SENSOR_IN_0V9 0x1c
/* enum: 0.9v power current: mA */
-#define MC_CMD_SENSOR_IN_I0V9 0x1d
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
/* enum: 1.2v power current: mA */
-#define MC_CMD_SENSOR_IN_I1V2 0x1e
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
/* enum: Not a sensor: reserved for the next page flag */
-#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
/* enum: 0.9v power voltage (at ADC): mV */
-#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
/* enum: Controller temperature 2: degC */
-#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
/* enum: Voltage regulator internal temperature: degC */
-#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
/* enum: 0.9V voltage regulator temperature: degC */
-#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
/* enum: 1.2V voltage regulator temperature: degC */
-#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
/* enum: controller internal temperature sensor voltage (internal ADC): mV */
-#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
/* enum: controller internal temperature (internal ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
/* enum: controller internal temperature sensor voltage (external ADC): mV */
-#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
/* enum: controller internal temperature (external ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
/* enum: ambient temperature: degC */
-#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
/* enum: air flow: bool */
-#define MC_CMD_SENSOR_AIRFLOW 0x2a
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
/* enum: voltage between VSS08D and VSS08D at CSR: mV */
-#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
-#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
/* enum: Hotpoint temperature: degC */
-#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
/* enum: Port 0 PHY power switch over-current: bool */
-#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
/* enum: Port 1 PHY power switch over-current: bool */
-#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
-/* enum: Mop-up microcontroller reference voltage (millivolts) */
-#define MC_CMD_SENSOR_MUM_VCC 0x30
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage: mV */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
/* enum: 0.9v power phase A voltage: mV */
-#define MC_CMD_SENSOR_IN_0V9_A 0x31
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
/* enum: 0.9v power phase A current: mA */
-#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
/* enum: 0.9V voltage regulator phase A temperature: degC */
-#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
/* enum: 0.9v power phase B voltage: mV */
-#define MC_CMD_SENSOR_IN_0V9_B 0x34
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
/* enum: 0.9v power phase B current: mA */
-#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
/* enum: 0.9V voltage regulator phase B temperature: degC */
-#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
-#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
-#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
-#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
-#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
/* enum: CCOM RTS temperature: degC */
-#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
/* enum: Not a sensor: reserved for the next page flag */
-#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
/* enum: controller internal temperature sensor voltage on master core
* (internal ADC): mV
*/
-#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
/* enum: controller internal temperature on master core (internal ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
/* enum: controller internal temperature sensor voltage on master core
* (external ADC): mV
*/
-#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
/* enum: controller internal temperature on master core (external ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
/* enum: controller internal temperature on slave core sensor voltage (internal
* ADC): mV
*/
-#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
/* enum: controller internal temperature on slave core (internal ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
/* enum: controller internal temperature on slave core sensor voltage (external
* ADC): mV
*/
-#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
/* enum: controller internal temperature on slave core (external ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
-#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
/* enum: Temperature of SODIMM 0 (if installed): degC */
-#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
/* enum: Temperature of SODIMM 1 (if installed): degC */
-#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
-#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
-#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
/* enum: Controller die temperature (TDIODE): degC */
-#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
/* enum: Board temperature (front): degC */
-#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
/* enum: Board temperature (back): degC */
-#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* enum: 1.8v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V8 0x51
+/* enum: 2.5v power current: mA */
+#define MC_CMD_SENSOR_IN_I2V5 0x52
+/* enum: 3.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I3V3 0x53
+/* enum: 12v power current: mA */
+#define MC_CMD_SENSOR_IN_I12V0 0x54
+/* enum: 1.3v power: mV */
+#define MC_CMD_SENSOR_IN_1V3 0x55
+/* enum: 1.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -6723,6 +5413,7 @@
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO_OUT */
#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
@@ -6775,7 +5466,7 @@
#define MC_CMD_READ_SENSORS 0x42
#undef MC_CMD_0x42_PRIVILEGE_CTG
-#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
@@ -6794,6 +5485,7 @@
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
/* MC_CMD_READ_SENSORS_OUT msgresponse */
#define MC_CMD_READ_SENSORS_OUT_LEN 0
@@ -6810,17 +5502,17 @@
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
/* enum: Ok. */
-#define MC_CMD_SENSOR_STATE_OK 0x0
+#define MC_CMD_SENSOR_STATE_OK 0x0
/* enum: Breached warning threshold. */
-#define MC_CMD_SENSOR_STATE_WARNING 0x1
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
/* enum: Breached fatal threshold. */
-#define MC_CMD_SENSOR_STATE_FATAL 0x2
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
/* enum: Fault with sensor. */
-#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
/* enum: Sensor is working but does not currently have a reading. */
-#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
/* enum: Sensor initialisation failed. */
-#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
@@ -6848,6 +5540,7 @@
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
/* enum: Ok. */
#define MC_CMD_PHY_STATE_OK 0x1
/* enum: Faulty. */
@@ -6885,6 +5578,7 @@
/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -6902,8 +5596,9 @@
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
@@ -6912,13 +5607,16 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
@@ -6929,6 +5627,7 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -6944,7 +5643,9 @@
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
@@ -6972,7 +5673,7 @@
#define MC_CMD_TESTASSERT 0x49
#undef MC_CMD_0x49_PRIVILEGE_CTG
-#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_TESTASSERT_IN msgrequest */
#define MC_CMD_TESTASSERT_IN_LEN 0
@@ -6984,20 +5685,21 @@
#define MC_CMD_TESTASSERT_V2_IN_LEN 4
/* How to provoke the assertion */
#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
* you're testing firmware, this is what you want.
*/
-#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
/* enum: Assert using assert(0); */
-#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
/* enum: Deliberately trigger a watchdog */
-#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
/* enum: Deliberately trigger a trap by loading from an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
/* enum: Deliberately trigger a trap by storing to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
/* enum: Jump to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
@@ -7020,6 +5722,7 @@
#define MC_CMD_WORKAROUND_IN_LEN 8
/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_WORKAROUND_BUG17230 0x1
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -7048,6 +5751,7 @@
* the workaround
*/
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
#define MC_CMD_WORKAROUND_OUT_LEN 0
@@ -7057,6 +5761,7 @@
*/
#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
@@ -7078,6 +5783,7 @@
/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -7085,6 +5791,7 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
@@ -7099,17 +5806,19 @@
#define MC_CMD_NVRAM_TEST 0x4c
#undef MC_CMD_0x4c_PRIVILEGE_CTG
-#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_TEST_OUT msgresponse */
#define MC_CMD_NVRAM_TEST_OUT_LEN 4
#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
/* enum: Passed. */
#define MC_CMD_NVRAM_TEST_PASS 0x0
/* enum: Failed. */
@@ -7130,12 +5839,16 @@
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
/* 0-6 low->high de-emph. */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
/* 0-8 0-8 low->high boost */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
@@ -7144,10 +5857,13 @@
#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
/* input bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
/* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
/* direction */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
/* enum: Out. */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
/* enum: In. */
@@ -7163,21 +5879,26 @@
#define MC_CMD_SENSOR_SET_LIMS 0x4e
#undef MC_CMD_0x4e_PRIVILEGE_CTG
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
@@ -7194,9 +5915,13 @@
/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
/***********************************/
@@ -7218,6 +5943,7 @@
#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
/* total number of partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
/* type ID code for each of NUM_PARTITIONS partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
@@ -7239,6 +5965,7 @@
#define MC_CMD_NVRAM_METADATA_IN_LEN 4
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
@@ -7246,7 +5973,9 @@
#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
@@ -7255,6 +5984,7 @@
#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
/* Subtype ID code for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
/* 1st component of W.X.Y.Z version number for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
@@ -7296,8 +6026,10 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
/* Number of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
/* Spacing of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
/***********************************/
@@ -7307,12 +6039,13 @@
#define MC_CMD_CLP 0x56
#undef MC_CMD_0x56_PRIVILEGE_CTG
-#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_CLP_IN msgrequest */
#define MC_CMD_CLP_IN_LEN 4
/* Sub operation */
#define MC_CMD_CLP_IN_OP_OFST 0
+#define MC_CMD_CLP_IN_OP_LEN 4
/* enum: Return to factory default settings */
#define MC_CMD_CLP_OP_DEFAULT 0x1
/* enum: Set MAC address */
@@ -7330,6 +6063,7 @@
/* MC_CMD_CLP_IN_DEFAULT msgrequest */
#define MC_CMD_CLP_IN_DEFAULT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
@@ -7337,6 +6071,7 @@
/* MC_CMD_CLP_IN_SET_MAC msgrequest */
#define MC_CMD_CLP_IN_SET_MAC_LEN 12
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MAC address assigned to port */
#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
@@ -7350,6 +6085,7 @@
/* MC_CMD_CLP_IN_GET_MAC msgrequest */
#define MC_CMD_CLP_IN_GET_MAC_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
@@ -7363,6 +6099,7 @@
/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* Boot flag */
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
@@ -7373,6 +6110,7 @@
/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
@@ -7391,11 +6129,12 @@
#define MC_CMD_MUM 0x57
#undef MC_CMD_0x57_PRIVILEGE_CTG
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_MUM_IN msgrequest */
#define MC_CMD_MUM_IN_LEN 4
#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_OP_LBN 0
#define MC_CMD_MUM_IN_OP_WIDTH 8
/* enum: NULL MCDI command to MUM */
@@ -7435,26 +6174,32 @@
#define MC_CMD_MUM_IN_NULL_LEN 4
/* MUM cmd header */
#define MC_CMD_MUM_IN_CMD_OFST 0
+#define MC_CMD_MUM_IN_CMD_LEN 4
/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_READ msgrequest */
#define MC_CMD_MUM_IN_READ_LEN 16
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to read from registers of */
#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE 0x1
/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
/* 32-bit address to read from */
#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
/* Number of words to read. */
#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
/* MC_CMD_MUM_IN_WRITE msgrequest */
#define MC_CMD_MUM_IN_WRITE_LENMIN 16
@@ -7462,12 +6207,15 @@
#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to write to registers of */
#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
/* MC_CMD_MUM_DEV_HITTITE 0x1 */
/* 32-bit address to write to */
#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
/* Words to write */
#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
@@ -7480,12 +6228,16 @@
#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MUM I2C cmd code */
#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
/* Number of bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
/* Number of bytes to read */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
/* Bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
@@ -7496,21 +6248,28 @@
#define MC_CMD_MUM_IN_LOG_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_LOG_OP_OFST 4
-#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+#define MC_CMD_MUM_IN_LOG_OP_LEN 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
/* Enable/disable debug output to UART */
#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
/* MC_CMD_MUM_IN_GPIO msgrequest */
#define MC_CMD_MUM_IN_GPIO_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
@@ -7523,40 +6282,56 @@
/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
@@ -7569,26 +6344,34 @@
/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
@@ -7596,7 +6379,9 @@
#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
@@ -7606,13 +6391,16 @@
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Bit-mask of clocks to be programmed */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
/* Control flags for clock programming */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
@@ -7624,19 +6412,24 @@
#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Enable/Disable FPGA config from flash */
#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_QSFP msgrequest */
#define MC_CMD_MUM_IN_QSFP_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
@@ -7646,52 +6439,77 @@
#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_OUT msgresponse */
#define MC_CMD_MUM_OUT_LEN 0
@@ -7702,6 +6520,7 @@
/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
@@ -7739,8 +6558,10 @@
#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
/* The first 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
@@ -7749,8 +6570,10 @@
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
/* The first 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
@@ -7758,11 +6581,14 @@
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
@@ -7791,6 +6617,7 @@
/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
@@ -7798,6 +6625,7 @@
/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
@@ -7805,7 +6633,9 @@
/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
@@ -7814,6 +6644,7 @@
/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
@@ -7821,6 +6652,7 @@
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
@@ -7829,11 +6661,14 @@
/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
@@ -7841,12 +6676,14 @@
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
/* Discrete (soldered) DDR resistor strap info */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
/* Number of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
/* Array of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
@@ -7907,18 +6744,19 @@
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
#define EVB_PORT_ID_PORT_ID_OFST 0
+#define EVB_PORT_ID_PORT_ID_LEN 4
/* enum: An invalid port handle. */
-#define EVB_PORT_ID_NULL 0x0
+#define EVB_PORT_ID_NULL 0x0
/* enum: The port assigned to this function.. */
-#define EVB_PORT_ID_ASSIGNED 0x1000000
+#define EVB_PORT_ID_ASSIGNED 0x1000000
/* enum: External network port 0 */
-#define EVB_PORT_ID_MAC0 0x2000000
+#define EVB_PORT_ID_MAC0 0x2000000
/* enum: External network port 1 */
-#define EVB_PORT_ID_MAC1 0x2000001
+#define EVB_PORT_ID_MAC1 0x2000001
/* enum: External network port 2 */
-#define EVB_PORT_ID_MAC2 0x2000002
+#define EVB_PORT_ID_MAC2 0x2000002
/* enum: External network port 3 */
-#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_MAC3 0x2000003
#define EVB_PORT_ID_PORT_ID_LBN 0
#define EVB_PORT_ID_PORT_ID_WIDTH 32
@@ -7930,7 +6768,7 @@
#define EVB_VLAN_TAG_MODE_LBN 12
#define EVB_VLAN_TAG_MODE_WIDTH 4
/* enum: Insert the VLAN. */
-#define EVB_VLAN_TAG_INSERT 0x0
+#define EVB_VLAN_TAG_INSERT 0x0
/* enum: Replace the VLAN if already present. */
#define EVB_VLAN_TAG_REPLACE 0x1
@@ -7959,121 +6797,149 @@
#define NVRAM_PARTITION_TYPE_ID_OFST 0
#define NVRAM_PARTITION_TYPE_ID_LEN 2
/* enum: Primary MC firmware partition */
-#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
/* enum: Secondary MC firmware partition */
-#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
/* enum: Expansion ROM partition */
-#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
/* enum: Static configuration TLV partition */
-#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
/* enum: Dynamic configuration TLV partition */
-#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
/* enum: Expansion ROM configuration data for port 0 */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
/* enum: Expansion ROM configuration data for port 1 */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
/* enum: Expansion ROM configuration data for port 2 */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
/* enum: Expansion ROM configuration data for port 3 */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
/* enum: Non-volatile log output partition */
-#define NVRAM_PARTITION_TYPE_LOG 0x700
+#define NVRAM_PARTITION_TYPE_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
-#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
/* enum: Device state dump output partition */
-#define NVRAM_PARTITION_TYPE_DUMP 0x800
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Application license key storage partition */
-#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
-#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
-#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
/* enum: Primary FPGA partition */
-#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
/* enum: Secondary FPGA partition */
-#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
/* enum: FC firmware partition */
-#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
/* enum: FC License partition */
-#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
/* enum: Non-volatile log output partition for FC */
-#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
/* enum: MUM firmware partition */
-#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
/* enum: MUM Non-volatile log output partition. */
-#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
/* enum: MUM Application table partition. */
-#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
/* enum: MUM boot rom partition. */
-#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
/* enum: MUM production signatures & calibration rom partition. */
-#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
/* enum: MUM user signatures & calibration rom partition. */
-#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
/* enum: MUM fuses and lockbits partition. */
-#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
/* enum: UEFI expansion ROM if separate from PXE */
-#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
-/* enum: Spare partition 0 */
-#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Used by the expansion ROM for logging */
+#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
/* enum: Used for XIP code of shmbooted images */
-#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
+#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
/* enum: Spare partition 2 */
-#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
/* enum: Manufacturing partition. Used during manufacture to pass information
* between XJTAG and Manftest.
*/
-#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
/* enum: Spare partition 4 */
-#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
/* enum: Spare partition 5 */
-#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
/* enum: Partition for reporting MC status. See mc_flash_layout.h
* medford_mc_status_hdr_t for layout on Medford.
*/
-#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Spare partition 13 */
+#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700
+/* enum: Spare partition 14 */
+#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800
+/* enum: Spare partition 15 */
+#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900
+/* enum: Spare partition 16 */
+#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
/* enum: Start of reserved value range (firmware may use for any purpose) */
-#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
-#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
/* enum: Recovery partition map (provided if real map is missing or corrupt) */
-#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
/* enum: Partition map (real map as stored in flash) */
-#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
/* LICENSED_APP_ID structuredef */
#define LICENSED_APP_ID_LEN 4
#define LICENSED_APP_ID_ID_OFST 0
+#define LICENSED_APP_ID_ID_LEN 4
/* enum: OpenOnload */
-#define LICENSED_APP_ID_ONLOAD 0x1
+#define LICENSED_APP_ID_ONLOAD 0x1
/* enum: PTP timestamping */
-#define LICENSED_APP_ID_PTP 0x2
+#define LICENSED_APP_ID_PTP 0x2
/* enum: SolarCapture Pro */
-#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
/* enum: SolarSecure filter engine */
-#define LICENSED_APP_ID_SOLARSECURE 0x8
+#define LICENSED_APP_ID_SOLARSECURE 0x8
/* enum: Performance monitor */
-#define LICENSED_APP_ID_PERF_MONITOR 0x10
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
/* enum: SolarCapture Live */
-#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
/* enum: Capture SolarSystem */
-#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
/* enum: Network Access Control */
-#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
/* enum: TCP Direct */
-#define LICENSED_APP_ID_TCP_DIRECT 0x100
+#define LICENSED_APP_ID_TCP_DIRECT 0x100
/* enum: Low Latency */
-#define LICENSED_APP_ID_LOW_LATENCY 0x200
+#define LICENSED_APP_ID_LOW_LATENCY 0x200
/* enum: SolarCapture Tap */
-#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
/* enum: Capture SolarSystem 40G */
#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
/* enum: Capture SolarSystem 1G */
-#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+/* enum: ScaleOut Onload */
+#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define LICENSED_APP_ID_DSHBRD 0x4000
+/* enum: SolarCapture Trading Analytics */
+#define LICENSED_APP_ID_SCATRD 0x8000
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
@@ -8140,6 +7006,12 @@
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_DSHBRD_LBN 14
+#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define LICENSED_V3_APPS_SCATRD_LBN 15
+#define LICENSED_V3_APPS_SCATRD_WIDTH 1
#define LICENSED_V3_APPS_MASK_LBN 0
#define LICENSED_V3_APPS_MASK_WIDTH 64
@@ -8185,11 +7057,23 @@
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
/* enum: This is a TX completion event, not a timestamp */
-#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13
/* enum: This is the low part of a TX timestamp event */
-#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
/* enum: This is the high part of a TX timestamp event */
-#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
/* upper 16 bits of timestamp data */
@@ -8232,6 +7116,42 @@
#define CTPIO_STATS_MAP_BUCKET_LBN 16
#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+/* MESSAGE_TYPE structuredef: When present this defines the meaning of a
+ * message, and is used to protect against chosen message attacks in signed
+ * messages, regardless their origin. The message type also defines the
+ * signature cryptographic algorithm, encoding, and message fields included in
+ * the signature. The values are used in different commands but must be unique
+ * across all commands, e.g. MC_CMD_TSA_BIND_IN_SECURE_UNBIND uses different
+ * message type than MC_CMD_SECURE_NIC_INFO_IN_STATUS.
+ */
+#define MESSAGE_TYPE_LEN 4
+#define MESSAGE_TYPE_MESSAGE_TYPE_OFST 0
+#define MESSAGE_TYPE_MESSAGE_TYPE_LEN 4
+#define MESSAGE_TYPE_UNUSED 0x0 /* enum */
+/* enum: Message type value for the response to a
+ * MC_CMD_TSA_BIND_IN_SECURE_UNBIND message. TSA_SECURE_UNBIND messages are
+ * ECDSA SECP384R1 signed using SHA384 message digest algorithm over fields
+ * MESSAGE_TYPE, TSANID, TSAID, and UNBINDTOKEN, and encoded as suggested by
+ * RFC6979 (section 2.4).
+ */
+#define MESSAGE_TYPE_TSA_SECURE_UNBIND 0x1
+/* enum: Message type value for the response to a
+ * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION message. TSA_SECURE_DECOMMISSION
+ * messages are ECDSA SECP384R1 signed using SHA384 message digest algorithm
+ * over fields MESSAGE_TYPE, TSAID, USER, and REASON, and encoded as suggested
+ * by RFC6979 (section 2.4).
+ */
+#define MESSAGE_TYPE_TSA_SECURE_DECOMMISSION 0x2
+/* enum: Message type value for the response to a
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. This enum value is not sequential
+ * to other message types for backwards compatibility as the message type for
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS was defined before the existence of this
+ * global enum.
+ */
+#define MESSAGE_TYPE_SECURE_NIC_INFO_STATUS 0xdb4
+#define MESSAGE_TYPE_MESSAGE_TYPE_LBN 0
+#define MESSAGE_TYPE_MESSAGE_TYPE_WIDTH 32
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -8240,7 +7160,7 @@
#define MC_CMD_READ_REGS 0x50
#undef MC_CMD_0x50_PRIVILEGE_CTG
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_REGS_IN msgrequest */
#define MC_CMD_READ_REGS_IN_LEN 0
@@ -8274,17 +7194,22 @@
#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
@@ -8300,6 +7225,7 @@
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -8310,13 +7236,16 @@
#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -8327,6 +7256,7 @@
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
@@ -8339,6 +7269,7 @@
#define MC_CMD_INIT_EVQ_OUT_LEN 4
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
@@ -8346,17 +7277,22 @@
#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
@@ -8393,6 +7329,7 @@
*/
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -8403,13 +7340,16 @@
#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -8420,6 +7360,7 @@
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
@@ -8432,8 +7373,10 @@
#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
/* Actual configuration applied on the card */
#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
@@ -8448,17 +7391,17 @@
#define QUEUE_CRC_MODE_MODE_LBN 0
#define QUEUE_CRC_MODE_MODE_WIDTH 4
/* enum: No CRC. */
-#define QUEUE_CRC_MODE_NONE 0x0
+#define QUEUE_CRC_MODE_NONE 0x0
/* enum: CRC Fiber channel over ethernet. */
-#define QUEUE_CRC_MODE_FCOE 0x1
+#define QUEUE_CRC_MODE_FCOE 0x1
/* enum: CRC (digest) iSCSI header only. */
-#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
/* enum: CRC (digest) iSCSI header and payload. */
-#define QUEUE_CRC_MODE_ISCSI 0x3
+#define QUEUE_CRC_MODE_ISCSI 0x3
/* enum: CRC Fiber channel over IP over ethernet. */
-#define QUEUE_CRC_MODE_FCOIPOE 0x4
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
/* enum: CRC MPA. */
-#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_MPA 0x5
#define QUEUE_CRC_MODE_SPARE_LBN 4
#define QUEUE_CRC_MODE_SPARE_WIDTH 4
@@ -8482,17 +7425,22 @@
#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
*/
#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
@@ -8511,8 +7459,10 @@
#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
@@ -8527,17 +7477,26 @@
#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
-/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
*/
#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
-/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
@@ -8555,26 +7514,37 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
/* enum: One packet per descriptor (for normal networking) */
-#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
-#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -8583,6 +7553,116 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
+
+/* MC_CMD_INIT_RXQ_V3_IN msgrequest */
+#define MC_CMD_INIT_RXQ_V3_IN_LEN 560
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
/* MC_CMD_INIT_RXQ_OUT msgresponse */
#define MC_CMD_INIT_RXQ_OUT_LEN 0
@@ -8590,6 +7670,9 @@
/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0
+
/***********************************/
/* MC_CMD_INIT_TXQ
@@ -8607,18 +7690,23 @@
#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -8639,8 +7727,10 @@
#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
@@ -8655,18 +7745,23 @@
#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -8689,10 +7784,14 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -8702,6 +7801,7 @@
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
/* Flags related to Qbb flow control mode. */
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
@@ -8729,6 +7829,7 @@
* passed to INIT_EVQ
*/
#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_EVQ_OUT msgresponse */
#define MC_CMD_FINI_EVQ_OUT_LEN 0
@@ -8747,6 +7848,7 @@
#define MC_CMD_FINI_RXQ_IN_LEN 4
/* Instance of RXQ to destroy */
#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_RXQ_OUT msgresponse */
#define MC_CMD_FINI_RXQ_OUT_LEN 0
@@ -8765,6 +7867,7 @@
#define MC_CMD_FINI_TXQ_IN_LEN 4
/* Instance of TXQ to destroy */
#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_TXQ_OUT msgresponse */
#define MC_CMD_FINI_TXQ_OUT_LEN 0
@@ -8783,6 +7886,7 @@
#define MC_CMD_DRIVER_EVENT_IN_LEN 12
/* Handle of target EVQ */
#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
/* Bits 0 - 63 of event */
#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
@@ -8809,11 +7913,12 @@
#define MC_CMD_PROXY_CMD_IN_LEN 4
/* The handle of the target function. */
#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
-#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
@@ -8824,8 +7929,9 @@
#define MC_PROXY_STATUS_BUFFER_LEN 16
/* Handle allocated by the firmware for this proxy transaction */
#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
/* enum: An invalid handle. */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
/* The requesting physical function number */
@@ -8854,6 +7960,7 @@
* elevated privilege mask granted to the requesting function.
*/
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
@@ -8871,6 +7978,7 @@
/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -8882,6 +7990,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -8891,6 +8000,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -8901,8 +8011,10 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
@@ -8910,6 +8022,7 @@
/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -8921,6 +8034,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -8930,6 +8044,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -8940,12 +8055,15 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -8966,7 +8084,9 @@
/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
* is stored in the REPLY_BUFF.
*/
@@ -8982,6 +8102,7 @@
*/
#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
@@ -9002,17 +8123,22 @@
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
/* Owner ID to use */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
/* Size of buffer table pages to use, in bytes (note that only a few values are
* legal on any specific hardware).
*/
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
/* Buffer table IDs for use in DMA descriptors. */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
/***********************************/
@@ -9029,10 +8155,13 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
/* ID */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Num entries */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* Buffer table entry address */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
@@ -9056,48 +8185,11 @@
/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-/* PORT_CONFIG_ENTRY structuredef */
-#define PORT_CONFIG_ENTRY_LEN 16
-/* External port number (label) */
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
-/* Port core location */
-#define PORT_CONFIG_ENTRY_CORE_OFST 1
-#define PORT_CONFIG_ENTRY_CORE_LEN 1
-#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
-#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
-#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
-#define PORT_CONFIG_ENTRY_CORE_LBN 8
-#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
-/* Internal number (HW resource) relative to the core */
-#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
-#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
-/* Reserved */
-#define PORT_CONFIG_ENTRY_RSVD_OFST 3
-#define PORT_CONFIG_ENTRY_RSVD_LEN 1
-#define PORT_CONFIG_ENTRY_RSVD_LBN 24
-#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
-/* Bitmask of KR lanes used by the port */
-#define PORT_CONFIG_ENTRY_LANES_OFST 4
-#define PORT_CONFIG_ENTRY_LANES_LBN 32
-#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
-/* Port capabilities (MC_CMD_PHY_CAP_*) */
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
-/* Reserved (align to 16 bytes) */
-#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
-#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
-#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -9112,18 +8204,19 @@
#define MC_CMD_FILTER_OP_IN_LEN 108
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_IN_OP_LEN 4
/* enum: single-recipient filter insert */
-#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
/* enum: single-recipient filter remove */
-#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
/* enum: multi-recipient filter subscribe */
-#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
/* enum: multi-recipient filter unsubscribe */
-#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
/* enum: replace one recipient with another (warning - the filter handle may
* change)
*/
-#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
/* filter handle (for remove / unsubscribe operations) */
#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
@@ -9132,8 +8225,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
@@ -9164,43 +8259,49 @@
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
/* enum: drop packets */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
/* enum: receive to MC */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
/* enum: loop back to TXDP 0 */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
/* enum: loop back to TXDP 1 */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
-#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
/* enum: receive to multiple queues using .1p mapping */
-#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
/* enum: install a filter entry that will never match; for test purposes only
*/
-#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
* RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
-#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
@@ -9231,8 +8332,10 @@
#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
/* Firmware defined register 1 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
/* source IP address to match (as bytes in network order; set last 12 bytes to
* 0 for IPv4 address)
*/
@@ -9251,6 +8354,7 @@
#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* filter handle (for remove / unsubscribe operations) */
@@ -9261,8 +8365,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
@@ -9321,43 +8427,49 @@
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
/* enum: drop packets */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
/* enum: receive to MC */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
/* enum: loop back to TXDP 0 */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
/* enum: loop back to TXDP 1 */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
/* enum: receive to multiple queues using .1p mapping */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
/* enum: install a filter entry that will never match; for test purposes only
*/
-#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
* RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
-#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
@@ -9388,27 +8500,29 @@
#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
* protocol is GRE) to match (as bytes in network order; set last byte to 0 for
* VXLAN/NVGRE, or 1 for Geneve)
*/
#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
/* enum: Match VXLAN traffic with this VNI */
-#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
/* enum: Match Geneve traffic with this VNI */
-#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
/* enum: Reserved for experimental development use */
-#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
/* enum: Match NVGRE traffic with this VSID */
-#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
/* source IP address to match (as bytes in network order; set last 12 bytes to
* 0 for IPv4 address)
*/
@@ -9458,10 +8572,12 @@
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
* order; set last 12 bytes to 0 for IPv4 address)
*/
@@ -9473,10 +8589,281 @@
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional
+ * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via
+ * its rte_flow API. This extension is only useful with the sfc_efx driver
+ * included as part of DPDK, used in conjunction with the dpdk datapath
+ * firmware variant.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_LEN 180
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16
+/* Set an action for all packets matching this filter. The DPDK driver and dpdk
+ * f/w variant use their own specific delivery structures, which are documented
+ * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything
+ * other than MATCH_ACTION_NONE when the NIC is running another f/w variant
+ * will cause the filter insertion to fail with ENOTSUP.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4
+/* enum: do nothing extra */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0
+/* enum: Set the match flag in the packet prefix for packets matching the
+ * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "FLAG" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1
+/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching
+ * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "MARK" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2
+/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the
+ * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX)
+ * will cause the filter insertion to fail with EINVAL.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4
+
/* MC_CMD_FILTER_OP_OUT msgresponse */
#define MC_CMD_FILTER_OP_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -9488,14 +8875,15 @@
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
/* enum: guaranteed invalid filter handle (low 32 bits) */
-#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
/* enum: guaranteed invalid filter handle (high 32 bits) */
-#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_EXT_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -9523,21 +8911,22 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
/* enum: read the list of supported RX filter matches */
-#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
/* enum: read flags indicating restrictions on filter insertion for the calling
* client
*/
-#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
/* enum: read properties relating to security rules (Medford-only; for use by
* SolarSecure apps, not directly by drivers. See SF-114946-SW.)
*/
-#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
* encapsulated frames, which follow a different match sequence to normal
* frames (Medford only)
*/
-#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -9545,10 +8934,12 @@
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* number of supported match types */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
/* array of supported match types (valid MATCH_FIELDS values for
* MC_CMD_FILTER_OP) sorted in decreasing priority order
*/
@@ -9561,10 +8952,12 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* bitfield of filter insertion restrictions */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
@@ -9578,28 +8971,37 @@
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* a version number representing the set of rule lookups that are implemented
* by the currently running firmware
*/
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4
/* enum: implements lookup sequences described in SF-114946-SW draft C */
-#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
/* the number of nodes in the subnet map */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4
/* the number of entries in one subnet map node */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4
/* minimum valid value for a subnet ID in a subnet map leaf */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4
/* maximum valid value for a subnet ID in a subnet map leaf */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4
/* the number of entries in the local and remote port range maps */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4
/* minimum valid value for a portrange ID in a port range map leaf */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4
/* maximum valid value for a portrange ID in a port range map leaf */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4
/***********************************/
@@ -9607,7 +9009,9 @@
* Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
* Please note that this interface is only of use to debug tools which have
* knowledge of firmware and hardware data structures; nothing here is intended
- * for use by normal driver code.
+ * for use by normal driver code. Note that although this command is in the
+ * Admin privilege group, in tamperproof adapters, only read operations are
+ * permitted.
*/
#define MC_CMD_PARSER_DISP_RW 0xe5
#undef MC_CMD_0xe5_PRIVILEGE_CTG
@@ -9618,42 +9022,58 @@
#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
/* identifies the target of the operation */
#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
/* enum: RX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
/* enum: TX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
/* enum: Lookup engine (with requested metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
/* enum: RX1 dispatcher CPU (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
/* enum: Miscellaneous other state (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-/* enum: read a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: write a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
-#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
+ * tamperproof adapters.
+ */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
+ * permitted on tamperproof adapters.
+ */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
/* data memory address (DICPU targets) or LUE index (LUE targets) */
#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
/* selector (for MISC_STATE target) */
#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
/* enum: Port to datapath mapping */
-#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
/* value to write (for DMEM writes) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
/* value to write (for LUE writes) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -9662,6 +9082,7 @@
#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
/* value read (for DMEM reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
/* value read (for LUE reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
@@ -9674,8 +9095,8 @@
#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
-#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
-#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
/***********************************/
@@ -9707,6 +9128,7 @@
#define MC_CMD_SET_PF_COUNT_IN_LEN 4
/* New number of PFs on the device. */
#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
@@ -9728,6 +9150,7 @@
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
/***********************************/
@@ -9743,6 +9166,7 @@
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
@@ -9761,8 +9185,10 @@
#define MC_CMD_ALLOC_VIS_IN_LEN 8
/* The minimum number of VIs that is acceptable */
#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
/* The maximum number of VIs that would be useful */
#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
* Use extended version in new code.
@@ -9770,21 +9196,26 @@
#define MC_CMD_ALLOC_VIS_OUT_LEN 8
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -9820,15 +9251,20 @@
#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
/***********************************/
@@ -9844,19 +9280,24 @@
#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF, or 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous, 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
@@ -9879,12 +9320,15 @@
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -9900,6 +9344,7 @@
#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
/* The VI number to query. */
#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
@@ -9933,6 +9378,7 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
@@ -10015,6 +9461,7 @@
#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
/***********************************/
@@ -10030,6 +9477,7 @@
#define MC_CMD_FREE_PIOBUF_IN_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
@@ -10048,6 +9496,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
/* VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
@@ -10070,6 +9519,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
/***********************************/
@@ -10085,6 +9535,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
/* VI number to set information for. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* Transaction processing steering hint 1 for use with the Rx Queue. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
@@ -10104,6 +9555,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
@@ -10121,22 +9573,25 @@
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* enum: MISC. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
/* enum: IDO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
/* enum: RO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
/* enum: TPH Type. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
@@ -10185,10 +9640,12 @@
/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
@@ -10229,7 +9686,7 @@
#define MC_CMD_SATELLITE_DOWNLOAD 0x91
#undef MC_CMD_0x91_PRIVILEGE_CTG
-#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
* are subtle, and so downloads must proceed in a number of phases.
@@ -10256,57 +9713,61 @@
* in a command from the host.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
/* Target for download. (These match the blob numbers defined in
* mc_flash_layout.h.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
/* enum: Last chunk, containing checksum rather than data */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
/* enum: Abort download of this item */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
/* Length of this chunk in bytes */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
/* Data for this chunk */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
@@ -10317,24 +9778,26 @@
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
/* Same as MC_CMD_ERR field, but included as 0 in success cases */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
/* Extra status information */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
/* enum: Code download OK, completed. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
/* enum: Code download aborted as requested. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
/* enum: Code download OK so far, send next chunk. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
/* enum: Download phases out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
/* enum: Bad target for this phase */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
/* enum: Chunk ID out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
/* enum: Chunk length zero or too large */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
/* enum: Checksum was incorrect */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
/***********************************/
@@ -10356,6 +9819,7 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
@@ -10418,48 +9882,58 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
/* enum: Standard RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
/* enum: Low latency RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6
/* enum: BIST RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
/* enum: RXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
/* enum: RXDP Test firmware image 3 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
/* enum: RXDP Test firmware image 4 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
/* enum: RXDP Test firmware image 5 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
/* enum: RXDP Test firmware image 6 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
/* enum: RXDP Test firmware image 7 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
/* enum: RXDP Test firmware image 8 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
/* enum: Standard TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
/* enum: Low latency TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6
/* enum: BIST TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
/* enum: TXDP CSR bus test firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -10469,39 +9943,43 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial RX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: RX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
/* enum: Low latency RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
/* enum: Packed stream RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
/* enum: RX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
* encapsulations (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
@@ -10511,36 +9989,42 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial TX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: TX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
/* enum: TX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
@@ -10549,6 +10033,7 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
@@ -10611,48 +10096,58 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
/* enum: Standard RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
/* enum: Low latency RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6
/* enum: BIST RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
/* enum: RXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
/* enum: RXDP Test firmware image 3 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
/* enum: RXDP Test firmware image 4 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
/* enum: RXDP Test firmware image 5 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
/* enum: RXDP Test firmware image 6 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
/* enum: RXDP Test firmware image 7 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
/* enum: RXDP Test firmware image 8 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
/* enum: Standard TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
/* enum: Low latency TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6
/* enum: BIST TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
/* enum: TXDP CSR bus test firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -10662,39 +10157,43 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial RX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: RX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
/* enum: Low latency RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
/* enum: Packed stream RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
/* enum: RX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
* encapsulations (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
@@ -10704,38 +10203,45 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial TX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: TX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
/* enum: TX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -10766,6 +10272,30 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -10779,18 +10309,18 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
/* enum: The caller is not permitted to access information on this PF. */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
/* enum: PF does not exist. */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
/* enum: PF does exist but is not assigned to any external port. */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
/* enum: This value indicates that PF is assigned, but it cannot be expressed
* in this field. It is intended for a possible future situation where a more
* complex scheme of PFs to ports mapping is being used. The future driver
* should look for a new field supporting the new scheme. The current/old
* driver should treat this value as PF_NOT_ASSIGNED.
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
/* One byte per PF containing the number of its VFs, indexed by PF number. A
* special value indicates that a PF is not present.
*/
@@ -10798,9 +10328,9 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
/* enum: The caller is not permitted to access information on this PF. */
-/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
-/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
/* Number of VIs available for each external port */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
@@ -10826,6 +10356,7 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
@@ -10888,48 +10419,58 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
/* enum: Standard RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
/* enum: Low latency RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6
/* enum: BIST RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
/* enum: RXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
/* enum: RXDP Test firmware image 3 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
/* enum: RXDP Test firmware image 4 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
/* enum: RXDP Test firmware image 5 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
/* enum: RXDP Test firmware image 6 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
/* enum: RXDP Test firmware image 7 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
/* enum: RXDP Test firmware image 8 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
/* enum: Standard TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
/* enum: Low latency TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6
/* enum: BIST TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
/* enum: TXDP CSR bus test firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -10939,39 +10480,43 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial RX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: RX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
/* enum: Low latency RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
/* enum: Packed stream RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
/* enum: RX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
* encapsulations (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
@@ -10981,38 +10526,45 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial TX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: TX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
/* enum: TX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -11043,6 +10595,30 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -11056,18 +10632,18 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
/* enum: The caller is not permitted to access information on this PF. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
/* enum: PF does not exist. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
/* enum: PF does exist but is not assigned to any external port. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
/* enum: This value indicates that PF is assigned, but it cannot be expressed
* in this field. It is intended for a possible future situation where a more
* complex scheme of PFs to ports mapping is being used. The future driver
* should look for a new field supporting the new scheme. The current/old
* driver should treat this value as PF_NOT_ASSIGNED.
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
/* One byte per PF containing the number of its VFs, indexed by PF number. A
* special value indicates that a PF is not present.
*/
@@ -11075,9 +10651,9 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
/* enum: The caller is not permitted to access information on this PF. */
-/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
-/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
/* Number of VIs available for each external port */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
@@ -11108,11 +10684,11 @@
/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
* CTPIO is not mapped.
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
* (SF-115995-SW) in the present configuration of firmware and port mode.
*/
@@ -11124,6 +10700,723 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -11144,7 +11437,16 @@
#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
-#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
+/* Type of command/response */
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
+/* enum: MCDI command directed to or response originating from the MC. */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0
+/* enum: MCDI command directed to a TSA controller. MCDI responses of this type
+ * are not defined.
+ */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
/***********************************/
@@ -11163,6 +11465,7 @@
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
/***********************************/
@@ -11178,6 +11481,7 @@
#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
@@ -11196,17 +11500,22 @@
#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
/* the desired maximum fill level */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -11225,10 +11534,13 @@
#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -11237,25 +11549,32 @@
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -11264,18 +11583,23 @@
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -11294,8 +11618,10 @@
#define MC_CMD_LINK_PIOBUF_IN_LEN 8
/* Handle for allocated push I/O buffer. */
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
@@ -11314,6 +11640,7 @@
#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
@@ -11332,20 +11659,23 @@
#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
/* The port to connect to the v-switch's upstream port. */
#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of v-switch to create. */
#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
/* enum: VEB */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
/* enum: VEPA (obsolete) */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
/* enum: MUX */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
/* enum: Snapper specific; semantics TBD */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
/* Flags controlling v-port creation */
#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
@@ -11356,6 +11686,7 @@
* v-ports with this number of tags.
*/
#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
@@ -11374,6 +11705,7 @@
#define MC_CMD_VSWITCH_FREE_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
@@ -11394,6 +11726,7 @@
#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
@@ -11412,28 +11745,31 @@
#define MC_CMD_VPORT_ALLOC_IN_LEN 20
/* The port to which the v-switch is connected. */
#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of the new v-port. */
#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN (obsolete) */
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
/* enum: VEB (obsolete) */
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
/* enum: VEPA (obsolete) */
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
/* enum: A normal v-port receives packets which match a specified MAC and/or
* VLAN.
*/
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
/* enum: An expansion v-port packets traffic which don't match any other
* v-port.
*/
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
/* enum: An test v-port receives packets which match any filters installed by
* its downstream components.
*/
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
/* Flags controlling v-port creation */
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
@@ -11443,8 +11779,10 @@
* v-switch.
*/
#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -11454,6 +11792,7 @@
#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
/* The handle of the new v-port */
#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
/***********************************/
@@ -11469,6 +11808,7 @@
#define MC_CMD_VPORT_FREE_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_FREE_OUT msgresponse */
#define MC_CMD_VPORT_FREE_OUT_LEN 0
@@ -11487,18 +11827,23 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
/* The port to connect to the v-adaptor's port. */
#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Flags controlling v-adaptor creation */
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
/* The number of VLAN tags to transparently insert/remove. */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -11507,7 +11852,7 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
/* enum: Derive the MAC address from the upstream port */
-#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
@@ -11526,6 +11871,7 @@
#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
@@ -11544,6 +11890,7 @@
#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* The new MAC address to assign to this v-adaptor */
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
@@ -11565,6 +11912,7 @@
#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
@@ -11586,15 +11934,19 @@
#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
/* The number of VLAN tags that may still be added */
#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -11610,8 +11962,10 @@
#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
/* The port to assign. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
/* The target function to modify. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
@@ -11633,9 +11987,13 @@
/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
/* Write enable bits 0-3, set to write, clear to read. */
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
@@ -11647,9 +12005,13 @@
*/
#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
/***********************************/
@@ -11665,11 +12027,13 @@
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
/* The handle of the owning upstream port */
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
/* The handle of the new Onload stack */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
/***********************************/
@@ -11685,6 +12049,7 @@
#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
/* The handle of the Onload stack */
#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
@@ -11703,21 +12068,24 @@
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
/* The handle of the owning upstream port */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of context to allocate */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
/* enum: Allocate a context for exclusive use. The key and indirection table
* must be explicitly configured.
*/
-#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
/* enum: Allocate a context for shared use; this will spread across a range of
* queues, but the key and indirection table are pre-configured and may not be
* changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
*/
-#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
/* Number of queues spanned by this context, in the range 1-64; valid offsets
* in the indirection table will be in the range 0 to NUM_QUEUES-1.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
@@ -11726,8 +12094,9 @@
* handle.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
/* enum: guaranteed invalid RSS context handle value */
-#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
/***********************************/
@@ -11743,6 +12112,7 @@
#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
@@ -11761,6 +12131,7 @@
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
@@ -11782,6 +12153,7 @@
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
@@ -11803,6 +12175,7 @@
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* The 128-byte indirection table (1 byte per entry) */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
@@ -11824,6 +12197,7 @@
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
@@ -11845,6 +12219,7 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* Hash control flags. The _EN bits are always supported, but new modes are
* available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
* in this case, the MODE fields may be set to non-zero values, and will take
@@ -11858,6 +12233,7 @@
* particular packet type.)
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
@@ -11898,6 +12274,7 @@
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
@@ -11915,6 +12292,7 @@
* always be used for a SET regardless of old/new driver vs. old/new firmware.
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
@@ -11952,11 +12330,13 @@
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
/* The handle of the owning upstream port */
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
* offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
* referenced RSS contexts must span no more than this number.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
@@ -11965,8 +12345,9 @@
* handle.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
/* enum: guaranteed invalid .1p mapping handle value */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
/***********************************/
@@ -11982,6 +12363,7 @@
#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
@@ -12000,6 +12382,7 @@
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
* handle)
*/
@@ -12023,6 +12406,7 @@
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
@@ -12049,10 +12433,13 @@
#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
/* Base absolute interrupt vector number. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
/***********************************/
@@ -12070,10 +12457,13 @@
* let the system find a suitable base.
*/
#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
@@ -12092,6 +12482,7 @@
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -12113,6 +12504,7 @@
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -12134,6 +12526,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
@@ -12141,6 +12534,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
/* The number of MAC addresses returned */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
/* Array of MAC addresses */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
@@ -12163,8 +12557,10 @@
#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
/* The handle of the v-port */
#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
/* Flags requesting what should be changed. */
#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
@@ -12174,14 +12570,17 @@
* v-switch.
*/
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
/* The number of MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
/* MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
@@ -12190,6 +12589,7 @@
/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
@@ -12207,15 +12607,18 @@
#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
/* The number of VLAN tags that may be used on a v-adaptor connected to this
* EVB port.
*/
#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -12228,14 +12631,16 @@
#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
#undef MC_CMD_0xab_PRIVILEGE_CTG
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
/* Index of the first buffer table entry. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Number of buffer table entries to dump. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
@@ -12260,16 +12665,17 @@
/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
/* enum: pad to 64 bytes */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
/* enum: pad to 128 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
/* enum: pad to 256 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
@@ -12290,6 +12696,7 @@
/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
@@ -12314,8 +12721,10 @@
#define MC_CMD_GET_CLOCK_OUT_LEN 8
/* System frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
/* DPCPU frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/***********************************/
@@ -12325,69 +12734,83 @@
#define MC_CMD_SET_CLOCK 0xad
#undef MC_CMD_0xad_PRIVILEGE_CTG
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_CLOCK_IN msgrequest */
#define MC_CMD_SET_CLOCK_IN_LEN 28
/* Requested frequency in MHz for system clock domain */
#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
/* enum: Leave the system clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for inter-core clock domain */
#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
/* enum: Leave the inter-core clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for DPCPU clock domain */
#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
/* enum: Leave the DPCPU clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for PCS clock domain */
#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
/* enum: Leave the PCS clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for MC clock domain */
#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
/* enum: Leave the MC clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for rmon clock domain */
#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
/* enum: Leave the rmon clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for vswitch clock domain */
#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
/* enum: Leave the vswitch clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
/* MC_CMD_SET_CLOCK_OUT msgresponse */
#define MC_CMD_SET_CLOCK_OUT_LEN 28
/* Resulting system frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
/* enum: The system clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
/* Resulting inter-core frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
/* enum: The inter-core clock domain doesn't exist / isn't used */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
/* Resulting DPCPU frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/* enum: The dpcpu clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
/* Resulting PCS frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
/* enum: The PCS clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
/* Resulting MC frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
/* enum: The MC clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
/* Resulting rmon frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
/* enum: The rmon clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
/* Resulting vswitch frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
/* enum: The vswitch clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
/***********************************/
@@ -12397,27 +12820,28 @@
#define MC_CMD_DPCPU_RPC 0xae
#undef MC_CMD_0xae_PRIVILEGE_CTG
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
/* enum: RxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
/* enum: TxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
/* enum: TxDPCPU1 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
/* enum: RxDPCPU1 (Medford only) */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
* DPCPU_RX0)
*/
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
* DPCPU_TX0)
*/
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
* initialised to zero
*/
@@ -12425,15 +12849,15 @@
#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
@@ -12444,11 +12868,11 @@
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
@@ -12457,21 +12881,24 @@
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
/* Register data to write. Only valid in write/write-read. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
/* Register address. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
/* MC_CMD_DPCPU_RPC_OUT msgresponse */
#define MC_CMD_DPCPU_RPC_OUT_LEN 36
#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
/* DATA */
#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
@@ -12482,9 +12909,13 @@
#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
/***********************************/
@@ -12500,6 +12931,7 @@
#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
/* Interrupt level relative to base for function. */
#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
@@ -12512,14 +12944,15 @@
#define MC_CMD_SHMBOOT_OP 0xe6
#undef MC_CMD_0xe6_PRIVILEGE_CTG
-#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SHMBOOT_OP_IN msgrequest */
#define MC_CMD_SHMBOOT_OP_IN_LEN 4
/* Identifies the operation to perform */
#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
/* enum: Copy slave_data section to the slave core. (Greenport only) */
-#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
@@ -12532,13 +12965,16 @@
#define MC_CMD_CAP_BLK_READ 0xe7
#undef MC_CMD_0xe7_PRIVILEGE_CTG
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CAP_BLK_READ_IN msgrequest */
#define MC_CMD_CAP_BLK_READ_IN_LEN 12
#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
@@ -12559,53 +12995,77 @@
#define MC_CMD_DUMP_DO 0xe8
#undef MC_CMD_0xe8_PRIVILEGE_CTG
-#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_DO_IN msgrequest */
#define MC_CMD_DUMP_DO_IN_LEN 52
#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
-#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
-#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
-#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
-#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
/* enum: The uart port this command was received over (if using a uart
* transport)
*/
-#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
-#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/* MC_CMD_DUMP_DO_OUT msgresponse */
#define MC_CMD_DUMP_DO_OUT_LEN 4
#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
/***********************************/
@@ -12615,41 +13075,64 @@
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
#undef MC_CMD_0xe9_PRIVILEGE_CTG
-#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/***********************************/
@@ -12661,17 +13144,20 @@
#define MC_CMD_SET_PSU 0xea
#undef MC_CMD_0xea_PRIVILEGE_CTG
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_PSU_IN msgrequest */
#define MC_CMD_SET_PSU_IN_LEN 12
#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
-#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
-#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
/* desired value, eg voltage in mV */
#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
/* MC_CMD_SET_PSU_OUT msgresponse */
#define MC_CMD_SET_PSU_OUT_LEN 0
@@ -12692,7 +13178,9 @@
/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
/***********************************/
@@ -12704,7 +13192,7 @@
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
#undef MC_CMD_0xed_PRIVILEGE_CTG
-#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -12730,12 +13218,16 @@
#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
@@ -12759,12 +13251,16 @@
#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
/* CRC32 over OFFSET, LENGTH, RESERVED */
#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
/* Offset from which to read the data */
#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
/* MC_CMD_UART_RECV_DATA_IN msgresponse */
#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
@@ -12772,12 +13268,16 @@
#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
@@ -12791,14 +13291,16 @@
#define MC_CMD_READ_FUSES 0xf0
#undef MC_CMD_0xf0_PRIVILEGE_CTG
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_FUSES_IN msgrequest */
#define MC_CMD_READ_FUSES_IN_LEN 8
/* Offset in OTP to read */
#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
/* Length of data to read in bytes */
#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
/* MC_CMD_READ_FUSES_OUT msgresponse */
#define MC_CMD_READ_FUSES_OUT_LENMIN 4
@@ -12806,6 +13308,7 @@
#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
/* Length of returned OTP data in bytes */
#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
/* Returned data */
#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
@@ -12820,7 +13323,7 @@
#define MC_CMD_KR_TUNE 0xf1
#undef MC_CMD_0xf1_PRIVILEGE_CTG
-#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
@@ -12830,26 +13333,30 @@
#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
/* enum: Get current RXEQ settings */
-#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
/* enum: Override RXEQ settings */
-#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
/* enum: Get current TX Driver settings */
-#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
/* enum: Override TX Driver settings */
-#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
/* enum: Force KR Serdes reset / recalibration */
-#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
* signal.
*/
-#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
* caller should call this command repeatedly after starting eye plot, until no
* more data is returned.
*/
-#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
/* enum: Read Figure Of Merit (eye quality, higher is better). */
-#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+/* enum: Start/stop link training frames */
+#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_RUN 0x8
+/* enum: Issue KR link training command (control training coefficients) */
+#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_CMD 0x9
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -12883,44 +13390,98 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
/* enum: Attenuation (0-15, Huntington) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
/* enum: CTLE Boost (0-15, Huntington) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
* positive, Medford - 0-31)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
* positive, Medford - 0-31)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
* positive, Medford - 0-16)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
* positive, Medford - 0-16)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
* positive, Medford - 0-16)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
/* enum: Edge DFE DLEV (0-128 for Medford) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
/* enum: Variable Gain Amplifier (0-15, Medford) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
/* enum: CTLE EQ Capacitor (0-15, Medford) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
/* enum: CTLE EQ Resistor (0-7, Medford) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20
+/* enum: CDR integral loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
@@ -12985,39 +13546,39 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude (Huntington, Medford) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
/* enum: De-Emphasis Tap1 Fine */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
/* enum: De-Emphasis Tap2 Fine (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
/* enum: Pre-Emphasis Magnitude (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
/* enum: Pre-Emphasis Fine (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
/* enum: TX Slew Rate Coarse control (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
/* enum: TX Slew Rate Fine control (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
/* enum: TX Termination Impedance control (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
/* enum: TX Amplitude Fine control (Medford) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
-/* enum: Pre-shoot Tap (Medford) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
-/* enum: De-emphasis Tap (Medford) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford, Medford2) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford, Medford2) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
@@ -13078,7 +13639,27 @@
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1
+/* Scan duration / cycle count */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -13110,10 +13691,91 @@
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1
/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN msgrequest */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_OFST 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_LEN 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_STOP 0x0 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_START 0x1 /* enum */
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN msgrequest */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LEN 28
+/* Requested operation */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_LEN 4
+/* Set INITIALIZE state */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_OFST 8
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_LEN 4
+/* Set PRESET state */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_OFST 12
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_LEN 4
+/* C(-1) request */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_OFST 16
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_LEN 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_HOLD 0x0 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_INCREMENT 0x1 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_DECREMENT 0x2 /* enum */
+/* C(0) request */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_OFST 20
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(+1) request */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_OFST 24
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT msgresponse */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_LEN 24
+/* C(-1) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_OFST 0
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_LEN 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_NOT_UPDATED 0x0 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_UPDATED 0x1 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MINIMUM 0x2 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MAXIMUM 0x3 /* enum */
+/* C(0) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_OFST 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(+1) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(-1) value */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4
+/* C(0) value */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4
+/* C(+1) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4
/***********************************/
@@ -13123,7 +13785,7 @@
#define MC_CMD_PCIE_TUNE 0xf2
#undef MC_CMD_0xf2_PRIVILEGE_CTG
-#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_PCIE_TUNE_IN msgrequest */
#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
@@ -13133,22 +13795,22 @@
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
/* enum: Get current RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
/* enum: Override RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
/* enum: Get current TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
/* enum: Override TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
-#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
* caller should call this command repeatedly after starting eye plot, until no
* more data is returned.
*/
-#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
-#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
+#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
/* Align the arguments to 32 bits */
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
@@ -13182,46 +13844,46 @@
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
/* enum: Attenuation (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
/* enum: CTLE Boost (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
/* enum: DFE DLev */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
/* enum: Figure of Merit */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
/* enum: CTLE EQ Capacitor (HF Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
/* enum: CTLE EQ Resistor (DC Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
@@ -13285,15 +13947,15 @@
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
/* enum: TxMargin (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
/* enum: TxSwing (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
/* enum: De-emphasis coefficient C(-1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
/* enum: De-emphasis coefficient C(0) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
/* enum: De-emphasis coefficient C(+1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
/* Enum values, see field(s): */
@@ -13312,6 +13974,7 @@
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -13355,38 +14018,46 @@
#define MC_CMD_LICENSING_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_IN_OP_OFST 0
+#define MC_CMD_LICENSING_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
-#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
/* enum: report counts of installed licenses */
-#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
/* MC_CMD_LICENSING_OUT msgresponse */
#define MC_CMD_LICENSING_OUT_LEN 28
/* count of application keys which are valid */
#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being blacklisted */
#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being for the wrong node
*/
#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
-#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
-#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
/***********************************/
@@ -13403,37 +14074,44 @@
#define MC_CMD_LICENSING_V3_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
-#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
/* enum: report counts of installed licenses Returns EAGAIN if license
* processing (updating) has been started but not yet completed.
*/
-#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
/* MC_CMD_LICENSING_V3_OUT msgresponse */
#define MC_CMD_LICENSING_V3_OUT_LEN 88
/* count of keys which are valid */
#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
/* count of keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
/* count of keys which are invalid due to being for the wrong node */
#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
-#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
-#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
/* bitmask of licensed applications */
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
@@ -13471,8 +14149,10 @@
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
/* type of license (eg 3) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
/* length of the license ID (in bytes) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
/* the unique license ID of the adapter */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
@@ -13512,15 +14192,17 @@
#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
/* application ID to query (LICENSED_APP_ID_xxx) */
#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
/***********************************/
@@ -13548,10 +14230,11 @@
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
/***********************************/
@@ -13600,12 +14283,14 @@
#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
/* application ID */
#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
/* enum: validate application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
/* enum: mask application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
/* arguments specific to this particular operation */
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
@@ -13626,8 +14311,10 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
/* application ID */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
/* validation challenge */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
@@ -13636,6 +14323,7 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
/* feature expiry (time_t) */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
/* validation response */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
@@ -13644,10 +14332,13 @@
#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
/* application ID */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
/* flag */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
@@ -13686,12 +14377,14 @@
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
/* application expiry time */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
/* application expiry units */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
/* enum: expiry units are accounting units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
/* enum: expiry units are calendar days */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
/* base MAC address of the NIC stored in NVRAM (note that this is a constant
* value for a given NIC regardless which function is calling, effectively this
* is PF0 base MAC address)
@@ -13712,7 +14405,7 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
#undef MC_CMD_0xd5_PRIVILEGE_CTG
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
@@ -13723,10 +14416,11 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
/* whether to turn on or turn off the masked features */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
/* enum: turn the features off */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
/* enum: turn the features back on */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
@@ -13743,29 +14437,31 @@
#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
#undef MC_CMD_0xd6_PRIVILEGE_CTG
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
/* operation code */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
/* enum: install a new license, overwriting any existing temporary license.
* This is an asynchronous operation owing to the time taken to validate an
* ECDSA license
*/
-#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
/* enum: clear the license immediately rather than waiting for the next power
* cycle
*/
-#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
* operation
*/
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
/* ECDSA license and signature */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
@@ -13773,23 +14469,26 @@
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
/* status code */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
/* enum: finished validating and installing license */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
/* enum: license validation and installation in progress */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
/* enum: licensing error. More specific error messages are not provided to
* avoid exposing details of the licensing system to the client
*/
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
/* bitmask of licensed features */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
@@ -13814,23 +14513,27 @@
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
* that these handles should be considered opaque to the host, although a value
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -13845,7 +14548,7 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
#undef MC_CMD_0xf8_PRIVILEGE_CTG
-#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
@@ -13854,20 +14557,24 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -13885,19 +14592,21 @@
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
/* the type of configuration setting to change */
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
* internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
*/
-#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
* internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
* boolean.)
*/
-#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
/* handle for the entity to update: queue handle, EVB port ID, etc. depending
* on the type of configuration setting being changed
*/
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* new value: the details depend on the type of configuration setting being
* changed
*/
@@ -13923,12 +14632,14 @@
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
/* the type of configuration setting to read */
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
* the type of configuration setting being read
*/
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
@@ -13962,21 +14673,25 @@
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
* that these handles should be considered opaque to the host, although a value
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -13991,7 +14706,7 @@
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
#undef MC_CMD_0xfc_PRIVILEGE_CTG
-#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
@@ -14000,18 +14715,22 @@
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -14027,16 +14746,22 @@
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
/* The rx queue to get stats for. */
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
/***********************************/
@@ -14044,6 +14769,9 @@
* Find out about available PCIE resources
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+#undef MC_CMD_0xfd_PRIVILEGE_CTG
+
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
@@ -14052,20 +14780,27 @@
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
/* The maximum number of PFs the device can expose */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
/* The maximum number of VFs the device can expose in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
/* The maximum number of MSI-X vectors the device can provide in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each PF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each VF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one PF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one VF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
/***********************************/
@@ -14084,10 +14819,13 @@
#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
/* Default (canonical) board mode */
#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
/* Current board mode */
#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
/***********************************/
@@ -14097,21 +14835,26 @@
#define MC_CMD_READ_ATB 0x100
#undef MC_CMD_0x100_PRIVILEGE_CTG
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_ATB_IN msgrequest */
#define MC_CMD_READ_ATB_IN_LEN 16
#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
-#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
/* MC_CMD_READ_ATB_OUT msgresponse */
#define MC_CMD_READ_ATB_OUT_LEN 4
#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
/***********************************/
@@ -14129,7 +14872,9 @@
/* Each workaround is represented by a single bit according to the enums below.
*/
#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -14165,50 +14910,63 @@
* 1,3 = 0x00030001
*/
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
-#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
/* New privilege mask to be set. The mask will only be changed if the MSB is
* set to 1.
*/
#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
* adress.
*/
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
/* enum: Privilege that allows a Function to change the MAC address configured
* in its associated vAdapter/vPort.
*/
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
/* enum: Privilege that allows a Function to install filters that specify VLANs
* that are not in the permit list for the associated vPort. This privilege is
* primarily to support ESX where vPorts are created that restrict traffic to
* only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
*/
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
+/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for
+ * administrator-level operations that are not allowed from the local host once
+ * an adapter has Bound to a remote ServerLock Controller (see doxbox
+ * SF-117064-DG for background).
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
-#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
/* For an admin function, always all the privileges are reported. */
#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
/***********************************/
@@ -14226,27 +14984,30 @@
* e.g. VF 1,3 = 0x00030001
*/
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
/* New link state mode to be set */
#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
/* enum: Use this value to just read the existing setting without modifying it.
*/
-#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
/***********************************/
/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
* parameter to MC_CMD_INIT_RXQ.
*/
#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
@@ -14261,8 +15022,10 @@
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
/* Minimum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
/* Maximum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
/***********************************/
@@ -14272,7 +15035,7 @@
#define MC_CMD_FUSE_DIAGS 0x102
#undef MC_CMD_0x102_PRIVILEGE_CTG
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_FUSE_DIAGS_IN msgrequest */
#define MC_CMD_FUSE_DIAGS_IN_LEN 0
@@ -14281,28 +15044,40 @@
#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
/* Total number of mismatched bits between pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
/***********************************/
@@ -14320,14 +15095,16 @@
#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
/* The groups of functions to have their privilege masks modified. */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
/* For VFS_OF_PF specify the PF, for ONE specify the target function */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
@@ -14336,10 +15113,12 @@
* refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
/* Privileges to be removed from the target functions. For privilege
* definitions refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
@@ -14358,8 +15137,10 @@
#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
@@ -14379,7 +15160,7 @@
#define MC_CMD_XPM_WRITE_BYTES 0x104
#undef MC_CMD_0x104_PRIVILEGE_CTG
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
@@ -14387,8 +15168,10 @@
#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
/* Start address (byte) */
#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
/* Data */
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
@@ -14406,14 +15189,16 @@
#define MC_CMD_XPM_READ_SECTOR 0x105
#undef MC_CMD_0x105_PRIVILEGE_CTG
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
/* Sector index */
#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
/* Sector size */
#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
@@ -14421,10 +15206,12 @@
#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
/* Sector type */
#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
/* Sector data */
#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
@@ -14439,7 +15226,7 @@
#define MC_CMD_XPM_WRITE_SECTOR 0x106
#undef MC_CMD_0x106_PRIVILEGE_CTG
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
@@ -14456,10 +15243,12 @@
#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
/* Sector type */
#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
/* Sector size */
#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
/* Sector data */
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
@@ -14470,6 +15259,7 @@
#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
/* New sector index */
#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
/***********************************/
@@ -14479,12 +15269,13 @@
#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
#undef MC_CMD_0x107_PRIVILEGE_CTG
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
/* Sector index */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
@@ -14497,14 +15288,16 @@
#define MC_CMD_XPM_BLANK_CHECK 0x108
#undef MC_CMD_0x108_PRIVILEGE_CTG
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
@@ -14512,6 +15305,7 @@
#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
/* Total number of bad (non-blank) locations */
#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
* into MCDI response)
*/
@@ -14528,14 +15322,16 @@
#define MC_CMD_XPM_REPAIR 0x109
#undef MC_CMD_0x109_PRIVILEGE_CTG
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_REPAIR_IN msgrequest */
#define MC_CMD_XPM_REPAIR_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
/* MC_CMD_XPM_REPAIR_OUT msgresponse */
#define MC_CMD_XPM_REPAIR_OUT_LEN 0
@@ -14549,7 +15345,7 @@
#define MC_CMD_XPM_DECODER_TEST 0x10a
#undef MC_CMD_0x10a_PRIVILEGE_CTG
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
@@ -14569,7 +15365,7 @@
#define MC_CMD_XPM_WRITE_TEST 0x10b
#undef MC_CMD_0x10b_PRIVILEGE_CTG
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
@@ -14590,16 +15386,19 @@
#define MC_CMD_EXEC_SIGNED 0x10c
#undef MC_CMD_0x10c_PRIVILEGE_CTG
-#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_EXEC_SIGNED_IN msgrequest */
#define MC_CMD_EXEC_SIGNED_IN_LEN 28
/* the length of code to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
/* the length of date to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
/* the XPM sector containing the key to use */
#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
/* the expected CMAC value */
#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
@@ -14617,12 +15416,13 @@
#define MC_CMD_PREPARE_SIGNED 0x10d
#undef MC_CMD_0x10d_PRIVILEGE_CTG
-#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
/* the length of data area to clear */
#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
@@ -14639,12 +15439,13 @@
#define MC_CMD_SET_SECURITY_RULE 0x10f
#undef MC_CMD_0x10f_PRIVILEGE_CTG
-#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */
#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92
/* fields to include in match criteria */
#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0
#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1
#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1
@@ -14701,8 +15502,10 @@
#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2
/* Physical port to match (as little-endian 32-bit value) */
#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28
+#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_LEN 4
/* Reserved; set to 0 */
#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32
+#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_LEN 4
/* remote IP address to match (as bytes in network order; set last 12 bytes to
* 0 for IPv4 address)
*/
@@ -14719,58 +15522,85 @@
* MC_CMD_SUBNET_MAP_SET_NODE appropriately
*/
#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_LEN 4
/* remote portrange ID to match (as little-endian 32-bit value); note that
* remote port ranges are matched by mapping the remote port to a "portrange
* ID" via a data structure which must already have been configured using
* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
*/
#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_LEN 4
/* local portrange ID to match (as little-endian 32-bit value); note that local
* port ranges are matched by mapping the local port to a "portrange ID" via a
* data structure which must already have been configured using
* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
*/
#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_LEN 4
/* set the action for transmitted packets matching this rule */
#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_LEN 4
/* enum: make no decision */
-#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
/* enum: decide to accept the packet */
-#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
/* enum: decide to drop the packet */
-#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+/* enum: inform the TSA controller about some sample of packets matching this
+ * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with
+ * either the WHITELIST or BLACKLIST action
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_SAMPLE 0x4
/* enum: do not change the current TX action */
-#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
/* set the action for received packets matching this rule */
#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_LEN 4
/* enum: make no decision */
-#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
/* enum: decide to accept the packet */
-#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
/* enum: decide to drop the packet */
-#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+/* enum: inform the TSA controller about some sample of packets matching this
+ * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with
+ * either the WHITELIST or BLACKLIST action
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_SAMPLE 0x4
/* enum: do not change the current RX action */
-#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
/* counter ID to associate with this rule; IDs are allocated using
* MC_CMD_SECURITY_RULE_COUNTER_ALLOC
*/
#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_LEN 4
/* enum: special value for the null counter ID */
-#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+/* enum: special value to tell the MC to allocate an available counter */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_SW_AUTO 0xeeeeeeee
+/* enum: special value to request use of hardware counter (Medford2 only) */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_HW 0xffffffff
/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */
-#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 28
+#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 32
/* new reference count for uses of counter ID */
#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_LEN 4
/* constructed match bits for this rule (as a tracing aid only) */
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12
/* constructed discriminator bits for this rule (as a tracing aid only) */
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_LEN 4
/* base location for probes for this rule (as a tracing aid only) */
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_LEN 4
/* step for probes for this rule (as a tracing aid only) */
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_LEN 4
+/* ID for reading back the counter */
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_OFST 28
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_LEN 4
/***********************************/
@@ -14784,14 +15614,15 @@
#define MC_CMD_RESET_SECURITY_RULES 0x110
#undef MC_CMD_0x110_PRIVILEGE_CTG
-#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */
#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4
/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */
#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0
+#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_LEN 4
/* enum: special value to reset all physical ports */
-#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
+#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */
#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0
@@ -14836,12 +15667,13 @@
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112
#undef MC_CMD_0x112_PRIVILEGE_CTG
-#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4
/* the number of new counter IDs to request */
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_LEN 4
/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4
@@ -14851,6 +15683,7 @@
* requested if resources are unavailable)
*/
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_LEN 4
/* new counter ID(s) */
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
@@ -14869,7 +15702,7 @@
#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113
#undef MC_CMD_0x113_PRIVILEGE_CTG
-#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4
@@ -14877,6 +15710,7 @@
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
/* the number of counter IDs to free */
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_LEN 4
/* the counter ID(s) to free */
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4
@@ -14900,7 +15734,7 @@
#define MC_CMD_SUBNET_MAP_SET_NODE 0x114
#undef MC_CMD_0x114_PRIVILEGE_CTG
-#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */
#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6
@@ -14908,6 +15742,7 @@
#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num))
/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */
#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_LEN 4
/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer
* to the next node, expressed as an offset in the trie memory (i.e. node ID
* multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range
@@ -14928,7 +15763,7 @@
*/
#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0
#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2
-#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
+#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0
#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16
/* final portrange ID for leaf nodes (don't care for branch nodes) */
@@ -14951,7 +15786,7 @@
#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115
#undef MC_CMD_0x115_PRIVILEGE_CTG
-#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */
#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
@@ -14982,7 +15817,7 @@
#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116
#undef MC_CMD_0x116_PRIVILEGE_CTG
-#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */
#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
@@ -15005,18 +15840,18 @@
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
/* enum: the IANA allocated UDP port for VXLAN */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
/* enum: the IANA allocated UDP port for Geneve */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
/* tunnel encapsulation protocol (only those named below are supported) */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
@@ -15073,18 +15908,22 @@
#define MC_CMD_RX_BALANCING 0x118
#undef MC_CMD_0x118_PRIVILEGE_CTG
-#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_RX_BALANCING_IN msgrequest */
#define MC_CMD_RX_BALANCING_IN_LEN 16
/* The RX port whose upconverter table will be modified */
#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
/* The VLAN priority associated to the table index and vFIFO */
#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
/* The resulting bit of SRC^DST for indexing the table */
#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
/* The RX engine to which the vFIFO in the table entry will point to */
#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
/* MC_CMD_RX_BALANCING_OUT msgresponse */
#define MC_CMD_RX_BALANCING_OUT_LEN 0
@@ -15093,12 +15932,7 @@
/***********************************/
/* MC_CMD_TSA_BIND
* TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more
- * info in respect to the binding protocol. This MCDI command is only available
- * over a TLS secure connection between the TSAN and TSAC, and is not available
- * to host software. Note- The messages definitions that do comprise this MCDI
- * command deemed as provisional. This MCDI command has not yet been used in
- * any released code and may change during development. This note will be
- * removed once it is regarded as stable.
+ * info in respect to the binding protocol.
*/
#define MC_CMD_TSA_BIND 0x119
#undef MC_CMD_0x119_PRIVILEGE_CTG
@@ -15108,15 +15942,13 @@
/* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */
#define MC_CMD_TSA_BIND_IN_LEN 4
#define MC_CMD_TSA_BIND_IN_OP_OFST 0
-/* enum: Retrieve the TSAN ID from a TSAN. TSAN ID is a unique identifier for
- * the network adapter. More specifically, TSAN ID equals the MAC address of
- * the network adapter. TSAN ID is used as part of the TSAN authentication
- * protocol. Refer to SF-114946-SW for more information.
- */
+#define MC_CMD_TSA_BIND_IN_OP_LEN 4
+/* enum: Obsolete. Use MC_CMD_SECURE_NIC_INFO_IN_STATUS. */
#define MC_CMD_TSA_BIND_OP_GET_ID 0x1
/* enum: Get a binding ticket from the TSAN. The binding ticket is used as part
* of the binding procedure to authorize the binding of an adapter to a TSAID.
- * Refer to SF-114946-SW for more information.
+ * Refer to SF-114946-SW for more information. This sub-command is only
+ * available over a TLS secure connection between the TSAN and TSAC.
*/
#define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2
/* enum: Opcode associated with the propagation of a private key that TSAN uses
@@ -15124,18 +15956,47 @@
* uses this key for a signing operation. TSAC uses the counterpart public key
* to verify the signature. Note - The post-binding authentication occurs when
* the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to
- * SF-114946-SW for more information.
+ * SF-114946-SW for more information. This sub-command is only available over a
+ * TLS secure connection between the TSAN and TSAC.
*/
#define MC_CMD_TSA_BIND_OP_SET_KEY 0x3
-/* enum: Request an unbinding operation. Note- TSAN clears the binding ticket
- * from the Nvram section.
+/* enum: Request an insecure unbinding operation. This sub-command is available
+ * for any privileged client.
*/
#define MC_CMD_TSA_BIND_OP_UNBIND 0x4
-
-/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest */
+/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */
+#define MC_CMD_TSA_BIND_OP_UNBIND_EXT 0x5
+/* enum: Opcode associated with the propagation of the unbinding secret token.
+ * TSAN persists the unbinding secret token. Refer to SF-115479-TC for more
+ * information. This sub-command is only available over a TLS secure connection
+ * between the TSAN and TSAC.
+ */
+#define MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN 0x6
+/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */
+#define MC_CMD_TSA_BIND_OP_DECOMMISSION 0x7
+/* enum: Obsolete. Use MC_CMD_GET_CERTIFICATE. */
+#define MC_CMD_TSA_BIND_OP_GET_CERTIFICATE 0x8
+/* enum: Request a secure unbinding operation using unbinding token. This sub-
+ * command is available for any privileged client.
+ */
+#define MC_CMD_TSA_BIND_OP_SECURE_UNBIND 0x9
+/* enum: Request a secure decommissioning operation. This sub-command is
+ * available for any privileged client.
+ */
+#define MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION 0xa
+/* enum: Test facility that allows an adapter to be configured to behave as if
+ * Bound to a TSA controller with restricted MCDI administrator operations.
+ * This operation is primarily intended to aid host driver development.
+ */
+#define MC_CMD_TSA_BIND_OP_TEST_MCDI 0xb
+
+/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest: Obsolete. Use
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS.
+ */
#define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20
/* The operation requested. */
#define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_GET_ID_OP_LEN 4
/* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates
* the nonce every time as part of the TSAN post-binding authentication
* procedure when the TSAN-TSAC connection terminates and TSAN does need to re-
@@ -15148,6 +16009,7 @@
#define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4
/* The operation requested. */
#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_LEN 4
/* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */
#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5
@@ -15155,6 +16017,7 @@
#define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num))
/* The operation requested. */
#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_LEN 4
/* This data blob contains the private key generated by the TSAC. TSAN uses
* this key for a signing operation. Note- This private key is used in
* conjunction with the post-binding TSAN authentication procedure that occurs
@@ -15166,26 +16029,261 @@
#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1
#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248
-/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Asks for the un-binding procedure */
+/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Request an insecure unbinding
+ * operation.
+ */
#define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10
/* The operation requested. */
#define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_UNBIND_OP_LEN 4
/* TSAN unique identifier for the network adapter */
#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4
#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6
-/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse */
+/* MC_CMD_TSA_BIND_IN_UNBIND_EXT msgrequest: Obsolete. Use
+ * MC_CMD_TSA_BIND_IN_SECURE_UNBIND.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMIN 93
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LEN(num) (92+1*(num))
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_LEN 4
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_OFST 4
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_LEN 6
+/* Align the arguments to 32 bits */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_OFST 10
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_LEN 2
+/* This attribute identifies the TSA infrastructure domain. The length of the
+ * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max
+ * length. Note- The TSAID is the Organizational Unit Name filed as part of the
+ * root and server certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_OFST 12
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_NUM 64
+/* Unbinding secret token. The adapter validates this unbinding token by
+ * comparing it against the one stored on the adapter as part of the
+ * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for
+ * more information.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_OFST 76
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_LEN 16
+/* This is the signature of the above mentioned fields- TSANID, TSAID and
+ * UNBINDTOKEN. As per current requirements, the SIG opaque data blob contains
+ * ECDSA ECC-384 based signature. The ECC curve is secp384r1. The signature is
+ * also ASN-1 encoded. Note- The signature is verified based on the public key
+ * stored into the root certificate that is provisioned on the adapter side.
+ * This key is known as the PUKtsaid. Refer to SF-115479-TC for more
+ * information.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_OFST 92
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MAXNUM 160
+
+/* MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_LEN 20
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_LEN 4
+/* Unbinding secret token. TSAN persists the unbinding secret token. Refer to
+ * SF-115479-TC for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_OFST 4
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_LEN 16
+/* enum: There are situations when the binding process does not complete
+ * successfully due to key, other attributes corruption at the database level
+ * (Controller). Adapter can't connect to the controller anymore. To recover,
+ * make usage of the decommission command that forces the adapter into
+ * unbinding state.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_ADAPTER_BINDING_FAILURE 0x1
+
+/* MC_CMD_TSA_BIND_IN_DECOMMISSION msgrequest: Obsolete. Use
+ * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMIN 109
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LEN(num) (108+1*(num))
+/* This is the signature of the above mentioned fields- TSAID, USER and REASON.
+ * As per current requirements, the SIG opaque data blob contains ECDSA ECC-384
+ * based signature. The ECC curve is secp384r1. The signature is also ASN-1
+ * encoded . Note- The signature is verified based on the public key stored
+ * into the root certificate that is provisioned on the adapter side. This key
+ * is known as the PUKtsaid. Refer to SF-115479-TC for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_OFST 108
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MAXNUM 144
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_LEN 4
+/* This attribute identifies the TSA infrastructure domain. The length of the
+ * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max
+ * length. Note- The TSAID is the Organizational Unit Name filed as part of the
+ * root and server certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_OFST 4
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_NUM 64
+/* User ID that comes, as an example, from the Controller. Note- The 33 byte
+ * length of this attribute is max length of the linux user name plus null
+ * character.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_OFST 68
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_LEN 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_NUM 33
+/* Align the arguments to 32 bits */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_OFST 101
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_LEN 3
+/* Reason of why decommissioning happens Note- The list of reasons, defined as
+ * part of the enumeration below, can be extended.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_OFST 104
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_LEN 4
+
+/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE msgrequest: Obsolete. Use
+ * MC_CMD_GET_CERTIFICATE.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_LEN 8
+/* The operation requested, must be MC_CMD_TSA_BIND_OP_GET_CERTIFICATE. */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_LEN 4
+/* Type of the certificate to be retrieved. */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_LEN 4
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_UNUSED 0x0 /* enum */
+/* enum: Adapter Authentication Certificate (AAC). The AAC is used by the
+ * controller to verify the authenticity of the adapter.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AAC 0x1
+/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is used by
+ * the controller to verify the validity of AAC.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AASC 0x2
+
+/* MC_CMD_TSA_BIND_IN_SECURE_UNBIND msgrequest: Request a secure unbinding
+ * operation using unbinding token.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMIN 97
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMAX 200
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LEN(num) (96+1*(num))
+/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_LEN 4
+/* Type of the message. (MESSAGE_TYPE_xxx) Must be
+ * MESSAGE_TYPE_TSA_SECURE_UNBIND.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_LEN 4
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_OFST 8
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_LEN 6
+/* Align the arguments to 32 bits */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_OFST 14
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_LEN 2
+/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This
+ * field is for information only, and not used by the firmware. Note- The TSAID
+ * is the Organizational Unit Name field as part of the root and server
+ * certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_OFST 16
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_NUM 64
+/* Unbinding secret token. The adapter validates this unbinding token by
+ * comparing it against the one stored on the adapter as part of the
+ * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for
+ * more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_OFST 80
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_LEN 16
+/* The signature computed and encoded as specified by MESSAGE_TYPE. */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_OFST 96
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MAXNUM 104
+
+/* MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION msgrequest: Request a secure
+ * decommissioning operation.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMIN 113
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMAX 216
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LEN(num) (112+1*(num))
+/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_LEN 4
+/* Type of the message. (MESSAGE_TYPE_xxx) Must be
+ * MESSAGE_TYPE_SECURE_DECOMMISSION.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_LEN 4
+/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This
+ * field is for information only, and not used by the firmware. Note- The TSAID
+ * is the Organizational Unit Name field as part of the root and server
+ * certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_OFST 8
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_NUM 64
+/* A NUL padded US-ASCII string containing user name of the creator of the
+ * decommissioning ticket. This field is for information only, and not used by
+ * the firmware.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_OFST 72
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_NUM 36
+/* Reason of why decommissioning happens */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_OFST 108
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_LEN 4
+/* enum: There are situations when the binding process does not complete
+ * successfully due to key, other attributes corruption at the database level
+ * (Controller). Adapter can't connect to the controller anymore. To recover,
+ * use the decommission command to force the adapter into unbound state.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_ADAPTER_BINDING_FAILURE 0x1
+/* The signature computed and encoded as specified by MESSAGE_TYPE. */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_OFST 112
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MAXNUM 104
+
+/* MC_CMD_TSA_BIND_IN_TEST_MCDI msgrequest: Test mode that emulates MCDI
+ * interface restrictions of a bound adapter. This operation is intended for
+ * test use on adapters that are not deployed and bound to a TSA Controller.
+ * Using it on a Bound adapter will succeed but will not alter the MCDI
+ * privileges as MCDI operations will already be restricted.
+ */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_LEN 8
+/* The operation requested must be MC_CMD_TSA_BIND_OP_TEST_MCDI. */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_LEN 4
+/* Enable or disable emulation of bound adapter */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_OFST 4
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_LEN 4
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_DISABLE 0x0 /* enum */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_ENABLE 0x1 /* enum */
+
+/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse: Obsolete. Use
+ * MC_CMD_SECURE_NIC_INFO_OUT_STATUS.
+ */
#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15
#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252
#define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num))
-/* The operation completion code. */
+/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_ID that is sent back to
+ * the caller.
+ */
#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_LEN 4
/* Rules engine type. Note- The rules engine type allows TSAC to further
* identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the
* proper action accordingly. As an example, TSAC uses the rules engine type to
* select the SF key that differs in the case of TSAN vs. NIC Emulator.
*/
#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_LEN 4
/* enum: Hardware rules engine. */
#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1
/* enum: Nic emulator rules engine. */
@@ -15209,8 +16307,11 @@
#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5
#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252
#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num))
-/* The operation completion code. */
+/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_TICKET that is sent back
+ * to the caller.
+ */
#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_LEN 4
/* The ticket represents the data blob construct that TSAN sends to TSAC as
* part of the binding protocol. From the TSAN perspective the ticket is an
* opaque construct. For more info refer to SF-115479-TC.
@@ -15222,23 +16323,142 @@
/* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */
#define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4
-/* The operation completion code. */
+/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_KEY that is sent back to
+ * the caller.
+ */
#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_LEN 4
-/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse */
+/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse: Response to insecure unbind request.
+ */
#define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8
/* Same as MC_CMD_ERR field, but included as 0 in success cases */
#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0
+#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_LEN 4
/* Extra status information */
#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4
+#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_LEN 4
/* enum: Unbind successful. */
-#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0
+#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0
/* enum: TSANID mismatch */
-#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1
/* enum: Unable to remove the binding ticket from persistent storage. */
-#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2
/* enum: TSAN is not bound to a binding ticket. */
-#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3
+
+/* MC_CMD_TSA_BIND_OUT_UNBIND_EXT msgresponse: Obsolete. Use
+ * MC_CMD_TSA_BIND_OUT_SECURE_UNBIND.
+ */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_OFST 0
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_LEN 4
+/* Extra status information */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_OFST 4
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a binding ticket. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_NOT_BOUND 0x3
+/* enum: Invalid unbind token */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TOKEN 0x4
+/* enum: Invalid signature */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_SIGNATURE 0x5
+
+/* MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN msgresponse */
+#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN that is sent
+ * back to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_LEN 4
+
+/* MC_CMD_TSA_BIND_OUT_DECOMMISSION msgresponse: Obsolete. Use
+ * MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION.
+ */
+#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_DECOMMISSION that is sent
+ * back to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_LEN 4
+
+/* MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMIN 9
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LEN(num) (8+1*(num))
+/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_CERTIFICATE that is sent
+ * back to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_LEN 4
+/* Type of the certificate. */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE/TYPE */
+/* The certificate data. */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_OFST 8
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MAXNUM 244
+
+/* MC_CMD_TSA_BIND_OUT_SECURE_UNBIND msgresponse: Response to secure unbind
+ * request.
+ */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_LEN 8
+/* The protocol operation code that is sent back to the caller. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_LEN 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_OFST 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a domain. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_NOT_BOUND 0x3
+/* enum: Invalid unbind token */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TOKEN 0x4
+/* enum: Invalid signature */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_SIGNATURE 0x5
+
+/* MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION msgresponse: Response to secure
+ * decommission request.
+ */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_LEN 8
+/* The protocol operation code that is sent back to the caller. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_LEN 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_OFST 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a domain. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_NOT_BOUND 0x3
+/* enum: Invalid unbind token */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TOKEN 0x4
+/* enum: Invalid signature */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_SIGNATURE 0x5
+
+/* MC_CMD_TSA_BIND_OUT_TEST_MCDI msgrequest */
+#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_TEST_MCDI that is sent back
+ * to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_LEN 4
/***********************************/
@@ -15251,9 +16471,9 @@
* will be loaded at power on or MC reboot, instead of the default ruleset.
* Rollback of the currently active ruleset to the cached version (when it is
* valid) is also supported. (Medford-only; for use by SolarSecure apps, not
- * directly by drivers. See SF-114946-SW.) NOTE - this message definition is
- * provisional. It has not yet been used in any released code and may change
- * during development. This note will be removed once it is regarded as stable.
+ * directly by drivers. See SF-114946-SW.) NOTE - The only sub-operation
+ * allowed in an adapter bound to a TSA controller from the local host is
+ * OP_GET_CACHED_VERSION. All other sub-operations are prohibited.
*/
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a
#undef MC_CMD_0x11a_PRIVILEGE_CTG
@@ -15264,18 +16484,19 @@
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4
/* the operation to perform */
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_LEN 4
/* enum: reports the ruleset version that is cached in persistent storage but
* performs no other action
*/
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0
/* enum: rolls back the active state to the cached version. (May fail with
* ENOENT if there is no valid cached version.)
*/
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1
/* enum: commits the active state to the persistent cache */
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2
/* enum: invalidates the persistent cache without affecting the active state */
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3
/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5
@@ -15285,12 +16506,13 @@
* requested operation in the case of rollback, commit, or invalidate)
*/
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_LEN 4
/* enum: persistent cache is invalid (the VERSION field will be empty in this
* case)
*/
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0
/* enum: persistent cache is valid */
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1
/* cached ruleset version (after completion of the requested operation, in the
* case of rollback, commit, or invalidate) as an opaque hash value in the same
* form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION
@@ -15309,7 +16531,7 @@
#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
#undef MC_CMD_0x11c_PRIVILEGE_CTG
-#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
@@ -15317,8 +16539,10 @@
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
/* The tag to be appended */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
/* The length of the data */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
/* The data to be contained in the TLV structure */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
@@ -15344,6 +16568,7 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
/* Data type to be checked */
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
@@ -15351,10 +16576,13 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
/* Number of sectors found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
/* Number of bytes found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
/* Length of signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
/* Signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
@@ -15380,23 +16608,29 @@
#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
/* Function-relative queue instance */
#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
/* Requested value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
/* Requested value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
-#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
-#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
-#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
-#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */
#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
/* Actual value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
/* Actual value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
/***********************************/
@@ -15415,29 +16649,35 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
/* Reserved for future use. */
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
* nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
* load/reload count.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
* allowed for timer load/reload counts.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
* multiple of this step size will be rounded in an implementation defined
* manner.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
* meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
/* Timer durations requested via MCDI that are not a multiple of this step size
* will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
/* For timers updated using the bug35388 workaround, this is the time interval
* (in nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
@@ -15445,17 +16685,20 @@
* is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, this is the maximum value
* allowed for timer load/reload counts. This field is only meaningful if the
* bug35388 workaround is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, timer load/reload counts
* not a multiple of this step size will be rounded in an implementation
* defined manner. This field is only meaningful if the bug35388 workaround is
* enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
/***********************************/
@@ -15466,7 +16709,7 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
#undef MC_CMD_0x11d_PRIVILEGE_CTG
-#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
@@ -15474,34 +16717,40 @@
* local queue index.
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
/* Will the common pool be used as TX_vFIFO_ULL (1) */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
/* Number of buffers to reserve for the common pool */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
/* TX datapath to which the Common Pool is connected to. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
/* enum: Extracts information from function */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
/* Network port or RX Engine to which the common pool connects. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
/* enum: Extracts information from function */
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
/* ID of the common pool allocated */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
/***********************************/
@@ -15512,42 +16761,49 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
#undef MC_CMD_0x11e_PRIVILEGE_CTG
-#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
/* Common pool previously allocated to which the new vFIFO will be associated
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
/* Port or RX engine to associate the vFIFO egress */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
/* enum: Extracts information from common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
/* Minimum number of buffers that the pool must have */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
/* enum: Do not check the space available */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
/* Will the vFIFO be used as TX_vFIFO_ULL */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
/* Network priority of the vFIFO,if applicable */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
/* enum: Search for the lowest unused priority */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
/* Short vFIFO ID */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
/* Network priority of the vFIFO */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
/***********************************/
@@ -15558,12 +16814,13 @@
#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
#undef MC_CMD_0x11f_PRIVILEGE_CTG
-#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
/* Short vFIFO ID */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
@@ -15577,12 +16834,13 @@
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
#undef MC_CMD_0x121_PRIVILEGE_CTG
-#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
/* Common pool ID given when pool allocated */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
@@ -15604,16 +16862,17 @@
#define MC_CMD_REKEY 0x123
#undef MC_CMD_0x123_PRIVILEGE_CTG
-#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REKEY_IN msgrequest */
#define MC_CMD_REKEY_IN_LEN 4
/* the type of operation requested */
#define MC_CMD_REKEY_IN_OP_OFST 0
+#define MC_CMD_REKEY_IN_OP_LEN 4
/* enum: Start the rekeying operation */
-#define MC_CMD_REKEY_IN_OP_REKEY 0x0
+#define MC_CMD_REKEY_IN_OP_REKEY 0x0
/* enum: Poll for completion of the rekeying operation */
-#define MC_CMD_REKEY_IN_OP_POLL 0x1
+#define MC_CMD_REKEY_IN_OP_POLL 0x1
/* MC_CMD_REKEY_OUT msgresponse */
#define MC_CMD_REKEY_OUT_LEN 0
@@ -15627,7 +16886,7 @@
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
#undef MC_CMD_0x124_PRIVILEGE_CTG
-#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
@@ -15636,8 +16895,10 @@
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
/* Available buffers for the ENG to NET vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
/***********************************/
@@ -15653,19 +16914,1231 @@
#define MC_CMD_SET_SECURITY_FUSES 0x126
#undef MC_CMD_0x126_PRIVILEGE_CTG
-#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */
#define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4
/* Flags specifying what type of security features are being set */
#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_LEN 4
#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0
#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1
#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1
#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_LBN 31
+#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_WIDTH 1
/* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */
#define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0
+/* MC_CMD_SET_SECURITY_FUSES_V2_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_LEN 4
+/* Flags specifying which security features are enforced on the NIC after the
+ * flags in the request have been applied. See
+ * MC_CMD_SET_SECURITY_FUSES_IN/FLAGS for flag definitions.
+ */
+#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TSA_INFO
+ * Messages sent from TSA adapter to TSA controller. This command is only valid
+ * when the MCDI header has MESSAGE_TYPE set to MCDI_MESSAGE_TYPE_TSA. This
+ * command is not sent by the driver to the MC; it is sent from the MC to a TSA
+ * controller, being treated more like an alert message rather than a command;
+ * hence the MC does not expect a response in return. Doxbox reference
+ * SF-117371-SW
+ */
+#define MC_CMD_TSA_INFO 0x127
+#undef MC_CMD_0x127_PRIVILEGE_CTG
+
+#define MC_CMD_0x127_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_INFO_IN msgrequest */
+#define MC_CMD_TSA_INFO_IN_LEN 4
+#define MC_CMD_TSA_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_TSA_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_TSA_INFO_IN_OP_LBN 0
+#define MC_CMD_TSA_INFO_IN_OP_WIDTH 16
+/* enum: Information about recently discovered local IP address of the adapter
+ */
+#define MC_CMD_TSA_INFO_OP_LOCAL_IP 0x1
+/* enum: Information about a sampled packet that either - did not match any
+ * black/white-list filters and was allowed by the default filter or - did not
+ * match any black/white-list filters and was denied by the default filter
+ */
+#define MC_CMD_TSA_INFO_OP_PKT_SAMPLE 0x2
+
+/* MC_CMD_TSA_INFO_IN_LOCAL_IP msgrequest:
+ *
+ * The TSA controller maintains a list of IP addresses valid for each port of a
+ * TSA adapter. The TSA controller requires information from the adapter
+ * inorder to learn new IP addresses assigned to a physical port and to
+ * identify those that are no longer assigned to the physical port. For this
+ * purpose, the TSA adapter snoops ARP replys, gratuitous ARP requests and ARP
+ * probe packets seen on each physical port. This definition describes the
+ * format of the notification message sent from a TSA adapter to a TSA
+ * controller related to any information related to a change in IP address
+ * assignment for a port. Doxbox reference SF-117371.
+ *
+ * There may be a possibility of combining multiple notifications in a single
+ * message in future. When that happens, a new flag can be defined using the
+ * reserved bits to describe the extended format of this notification.
+ */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_LEN 18
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_OFST 0
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_LEN 4
+/* Additional metadata describing the IP address information such as source of
+ * information retrieval, type of IP address, physical port number.
+ */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_OFST 4
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_LEN 4
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_LBN 0
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_WIDTH 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_LBN 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_WIDTH 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_LBN 16
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_WIDTH 8
+/* enum: ARP reply sent out of the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_TX_ARP 0x0
+/* enum: ARP probe packet received on the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_RX_ARP_PROBE 0x1
+/* enum: Gratuitous ARP packet received on the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_RX_GRATUITOUS_ARP 0x2
+/* enum: DHCP ACK packet received on the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_RX_DHCP_ACK 0x3
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_LBN 24
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_LBN 25
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_WIDTH 7
+/* IPV4 address retrieved from the sampled packets. This field is relevant only
+ * when META_IPV4 is set to 1.
+ */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_OFST 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_LEN 4
+/* Target MAC address retrieved from the sampled packet. */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_OFST 12
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_LEN 1
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_NUM 6
+
+/* MC_CMD_TSA_INFO_IN_PKT_SAMPLE msgrequest:
+ *
+ * It is desireable for the TSA controller to learn the traffic pattern of
+ * packets seen at the network port being monitored. In order to learn about
+ * the traffic pattern, the TSA controller may want to sample packets seen at
+ * the network port. Based on the packet samples that the TSA controller
+ * receives from the adapter, the controller may choose to configure additional
+ * black-list or white-list rules to allow or block packets as required.
+ *
+ * Although the entire sampled packet as seen on the network port is available
+ * to the MC the length of sampled packet sent to controller is restricted by
+ * MCDI payload size. Besides, the TSA controller does not require the entire
+ * packet to make decisions about filter updates. Hence the packet sample being
+ * passed to the controller is truncated to 128 bytes. This length is large
+ * enough to hold the ethernet header, IP header and maximum length of
+ * supported L4 protocol headers (IPv4 only, but can hold IPv6 header too, if
+ * required in future).
+ *
+ * The intention is that any future changes to this message format that are not
+ * backwards compatible will be defined with a new operation code.
+ */
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_LEN 136
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_OFST 0
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_LEN 4
+/* Additional metadata describing the sampled packet */
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_OFST 4
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_LEN 4
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_LBN 0
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_WIDTH 8
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_LBN 8
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_LBN 9
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_WIDTH 7
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_LBN 16
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_WIDTH 4
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_LBN 16
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_LBN 17
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_LBN 18
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_WIDTH 1
+/* 128-byte raw prefix of the sampled packet which includes the ethernet
+ * header, IP header and L4 protocol header (only IPv4 supported initially).
+ * This provides the controller enough information about the packet sample to
+ * report traffic patterns seen on a network port and to make decisions
+ * concerning rule-set updates.
+ */
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_OFST 8
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_LEN 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_NUM 128
+
+/* MC_CMD_TSA_INFO_OUT msgresponse */
+#define MC_CMD_TSA_INFO_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_HOST_INFO
+ * Commands to appply or retrieve host-related information from an adapter.
+ * Doxbox reference SF-117371-SW
+ */
+#define MC_CMD_HOST_INFO 0x128
+#undef MC_CMD_0x128_PRIVILEGE_CTG
+
+#define MC_CMD_0x128_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HOST_INFO_IN msgrequest */
+#define MC_CMD_HOST_INFO_IN_LEN 4
+/* sub-operation code info */
+#define MC_CMD_HOST_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_HOST_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_HOST_INFO_IN_OP_LBN 0
+#define MC_CMD_HOST_INFO_IN_OP_WIDTH 16
+/* enum: Read a 16-byte unique host identifier from the adapter. This UUID
+ * helps to identify the host that an adapter is plugged into. This identifier
+ * is ideally the system UUID retrieved and set by the UEFI driver. If the UEFI
+ * driver is unable to extract the system UUID, it would still set a random
+ * 16-byte value into each supported SF adapter plugged into it. Host UUIDs may
+ * change if the system is power-cycled, however, they persist across adapter
+ * resets. If the host UUID was not set on an adapter, due to an unsupported
+ * version of UEFI driver, then this command returns an error. Doxbox reference
+ * - SF-117371-SW section 'Host UUID'.
+ */
+#define MC_CMD_HOST_INFO_OP_GET_UUID 0x0
+/* enum: Set a 16-byte unique host identifier on the adapter to identify the
+ * host that the adapter is plugged into. See MC_CMD_HOST_INFO_OP_GET_UUID for
+ * further details.
+ */
+#define MC_CMD_HOST_INFO_OP_SET_UUID 0x1
+
+/* MC_CMD_HOST_INFO_IN_GET_UUID msgrequest */
+#define MC_CMD_HOST_INFO_IN_GET_UUID_LEN 4
+/* sub-operation code info */
+#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_OFST 0
+#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_LEN 4
+
+/* MC_CMD_HOST_INFO_OUT_GET_UUID msgresponse */
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_LEN 16
+/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID
+ * for further details.
+ */
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_OFST 0
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_LEN 1
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_NUM 16
+
+/* MC_CMD_HOST_INFO_IN_SET_UUID msgrequest */
+#define MC_CMD_HOST_INFO_IN_SET_UUID_LEN 20
+/* sub-operation code info */
+#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_OFST 0
+#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_LEN 4
+/* 16-byte host UUID set on the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID for
+ * further details.
+ */
+#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_OFST 4
+#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_LEN 1
+#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_NUM 16
+
+/* MC_CMD_HOST_INFO_OUT_SET_UUID msgresponse */
+#define MC_CMD_HOST_INFO_OUT_SET_UUID_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSAN_INFO
+ * Get TSA adapter information. TSA controllers query each TSA adapter to learn
+ * some configuration parameters of each adapter. Doxbox reference SF-117371-SW
+ * section 'Adapter Information'
+ */
+#define MC_CMD_TSAN_INFO 0x129
+#undef MC_CMD_0x129_PRIVILEGE_CTG
+
+#define MC_CMD_0x129_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSAN_INFO_IN msgrequest */
+#define MC_CMD_TSAN_INFO_IN_LEN 4
+/* sub-operation code info */
+#define MC_CMD_TSAN_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_TSAN_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_TSAN_INFO_IN_OP_LBN 0
+#define MC_CMD_TSAN_INFO_IN_OP_WIDTH 16
+/* enum: Read configuration parameters and IDs that uniquely identify an
+ * adapter. The parameters include - host identification, adapter
+ * identification string and number of physical ports on the adapter.
+ */
+#define MC_CMD_TSAN_INFO_OP_GET_CFG 0x0
+
+/* MC_CMD_TSAN_INFO_IN_GET_CFG msgrequest */
+#define MC_CMD_TSAN_INFO_IN_GET_CFG_LEN 4
+/* sub-operation code info */
+#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_OFST 0
+#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_LEN 4
+
+/* MC_CMD_TSAN_INFO_OUT_GET_CFG msgresponse */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_LEN 26
+/* Information about the configuration parameters returned in this response. */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_OFST 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_LEN 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_WIDTH 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_WIDTH 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_LBN 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_WIDTH 8
+/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID
+ * for further details.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_OFST 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_NUM 16
+/* A unique identifier per adapter. The base MAC address of the card is used
+ * for this purpose.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_OFST 20
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_NUM 6
+
+/* MC_CMD_TSAN_INFO_OUT_GET_CFG_V2 msgresponse */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_LEN 36
+/* Information about the configuration parameters returned in this response. */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_OFST 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_LEN 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_WIDTH 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_WIDTH 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_LBN 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_WIDTH 8
+/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID
+ * for further details.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_OFST 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_NUM 16
+/* A unique identifier per adapter. The base MAC address of the card is used
+ * for this purpose.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_OFST 20
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_NUM 6
+/* Unused bytes, defined for 32-bit alignment of new fields. */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_OFST 26
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_LEN 2
+/* Maximum number of TSA statistics counters in each direction of dataflow
+ * supported on the card. Note that the statistics counters are always
+ * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx
+ * counter.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_OFST 28
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_LEN 4
+/* Width of each statistics counter (represented in bits). This gives an
+ * indication of wrap point to the user.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_OFST 32
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TSA_STATISTICS
+ * TSA adapter statistics operations.
+ */
+#define MC_CMD_TSA_STATISTICS 0x130
+#undef MC_CMD_0x130_PRIVILEGE_CTG
+
+#define MC_CMD_0x130_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_STATISTICS_IN msgrequest */
+#define MC_CMD_TSA_STATISTICS_IN_LEN 4
+/* TSA statistics sub-operation code */
+#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_OFST 0
+#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_LEN 4
+/* enum: Get the configuration parameters that describe the TSA statistics
+ * layout on the adapter.
+ */
+#define MC_CMD_TSA_STATISTICS_OP_GET_CONFIG 0x0
+/* enum: Read and/or clear TSA statistics counters. */
+#define MC_CMD_TSA_STATISTICS_OP_READ_CLEAR 0x1
+
+/* MC_CMD_TSA_STATISTICS_IN_GET_CONFIG msgrequest */
+#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_LEN 4
+/* TSA statistics sub-operation code */
+#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_OFST 0
+#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_LEN 4
+
+/* MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG msgresponse */
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_LEN 8
+/* Maximum number of TSA statistics counters in each direction of dataflow
+ * supported on the card. Note that the statistics counters are always
+ * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx
+ * counter.
+ */
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_OFST 0
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_LEN 4
+/* Width of each statistics counter (represented in bits). This gives an
+ * indication of wrap point to the user.
+ */
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_OFST 4
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_LEN 4
+
+/* MC_CMD_TSA_STATISTICS_IN_READ_CLEAR msgrequest */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMIN 20
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMAX 252
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LEN(num) (16+4*(num))
+/* TSA statistics sub-operation code */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_OFST 0
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_LEN 4
+/* Parameters describing the statistics operation */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_OFST 4
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_LEN 4
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_LBN 0
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_WIDTH 1
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_LBN 1
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_WIDTH 1
+/* Counter ID list specification type */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_OFST 8
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_LEN 4
+/* enum: The statistics counters are specified as an unordered list of
+ * individual counter ID.
+ */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LIST 0x0
+/* enum: The statistics counters are specified as a range of consecutive
+ * counter IDs.
+ */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_RANGE 0x1
+/* Number of statistics counters */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_OFST 12
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_LEN 4
+/* Counter IDs to be read/cleared. When mode is set to LIST, this entry holds a
+ * list of counter IDs to be operated on. When mode is set to RANGE, this entry
+ * holds a single counter ID representing the start of the range of counter IDs
+ * to be operated on.
+ */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_OFST 16
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_LEN 4
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MINNUM 1
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MAXNUM 59
+
+/* MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR msgresponse */
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMIN 24
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMAX 248
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LEN(num) (8+16*(num))
+/* Number of statistics counters returned in this response */
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_OFST 0
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_LEN 4
+/* MC_TSA_STATISTICS_ENTRY Note that this field is expected to start at a
+ * 64-bit aligned offset
+ */
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_OFST 8
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_LEN 16
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MINNUM 1
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MAXNUM 15
+
+/* MC_TSA_STATISTICS_ENTRY structuredef */
+#define MC_TSA_STATISTICS_ENTRY_LEN 16
+/* Tx statistics counter */
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_OFST 0
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LEN 8
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_OFST 0
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_OFST 4
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LBN 0
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_WIDTH 64
+/* Rx statistics counter */
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_OFST 8
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LEN 8
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_OFST 8
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_OFST 12
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LBN 64
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_ERASE_INITIAL_NIC_SECRET
+ * This request causes the NIC to find the initial NIC secret (programmed
+ * during ATE) in XPM memory and if and only if the NIC has already been
+ * rekeyed with MC_CMD_REKEY, erase it. This is used by manftest after
+ * installing TSA binding certificates. See SF-117631-TC.
+ */
+#define MC_CMD_ERASE_INITIAL_NIC_SECRET 0x131
+#undef MC_CMD_0x131_PRIVILEGE_CTG
+
+#define MC_CMD_0x131_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_ERASE_INITIAL_NIC_SECRET_IN msgrequest */
+#define MC_CMD_ERASE_INITIAL_NIC_SECRET_IN_LEN 0
+
+/* MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT msgresponse */
+#define MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_CONFIG
+ * TSA adapter configuration operations. This command is used to prepare the
+ * NIC for TSA binding.
+ */
+#define MC_CMD_TSA_CONFIG 0x64
+#undef MC_CMD_0x64_PRIVILEGE_CTG
+
+#define MC_CMD_0x64_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSA_CONFIG_IN msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_LEN 4
+/* TSA configuration sub-operation code */
+#define MC_CMD_TSA_CONFIG_IN_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_OP_LEN 4
+/* enum: Append a single item to the tsa_config partition. Items will be
+ * encrypted unless they are declared as non-sensitive. Returns
+ * MC_CMD_ERR_EEXIST if the tag is already present.
+ */
+#define MC_CMD_TSA_CONFIG_OP_APPEND 0x1
+/* enum: Reset the tsa_config partition to a clean state. */
+#define MC_CMD_TSA_CONFIG_OP_RESET 0x2
+/* enum: Read back a configured item from tsa_config partition. Returns
+ * MC_CMD_ERR_ENOENT if the item doesn't exist, or MC_CMD_ERR_EPERM if the item
+ * is declared as sensitive (i.e. is encrypted).
+ */
+#define MC_CMD_TSA_CONFIG_OP_READ 0x3
+
+/* MC_CMD_TSA_CONFIG_IN_APPEND msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMIN 12
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMAX 252
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LEN(num) (12+1*(num))
+/* TSA configuration sub-operation code. The value shall be
+ * MC_CMD_TSA_CONFIG_OP_APPEND.
+ */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_LEN 4
+/* The tag to be appended */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_OFST 4
+#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_LEN 4
+/* The length of the data in bytes */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_OFST 8
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_LEN 4
+/* The item data */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_OFST 12
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_LEN 1
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MINNUM 0
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MAXNUM 240
+
+/* MC_CMD_TSA_CONFIG_OUT_APPEND msgresponse */
+#define MC_CMD_TSA_CONFIG_OUT_APPEND_LEN 0
+
+/* MC_CMD_TSA_CONFIG_IN_RESET msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_RESET_LEN 4
+/* TSA configuration sub-operation code. The value shall be
+ * MC_CMD_TSA_CONFIG_OP_RESET.
+ */
+#define MC_CMD_TSA_CONFIG_IN_RESET_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_RESET_OP_LEN 4
+
+/* MC_CMD_TSA_CONFIG_OUT_RESET msgresponse */
+#define MC_CMD_TSA_CONFIG_OUT_RESET_LEN 0
+
+/* MC_CMD_TSA_CONFIG_IN_READ msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_READ_LEN 8
+/* TSA configuration sub-operation code. The value shall be
+ * MC_CMD_TSA_CONFIG_OP_READ.
+ */
+#define MC_CMD_TSA_CONFIG_IN_READ_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_READ_OP_LEN 4
+/* The tag to be read */
+#define MC_CMD_TSA_CONFIG_IN_READ_TAG_OFST 4
+#define MC_CMD_TSA_CONFIG_IN_READ_TAG_LEN 4
+
+/* MC_CMD_TSA_CONFIG_OUT_READ msgresponse */
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENMIN 8
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENMAX 252
+#define MC_CMD_TSA_CONFIG_OUT_READ_LEN(num) (8+1*(num))
+/* The tag that was read */
+#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_OFST 0
+#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_LEN 4
+/* The length of the data in bytes */
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_OFST 4
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_LEN 4
+/* The data of the item. */
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_OFST 8
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_LEN 1
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MINNUM 0
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MAXNUM 244
+
+/* MC_TSA_IPV4_ITEM structuredef */
+#define MC_TSA_IPV4_ITEM_LEN 8
+/* Additional metadata describing the IP address information such as the
+ * physical port number the address is being used on. Unused space in this
+ * field is reserved for future expansion.
+ */
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_OFST 0
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LEN 4
+#define MC_TSA_IPV4_ITEM_PORT_IDX_LBN 0
+#define MC_TSA_IPV4_ITEM_PORT_IDX_WIDTH 8
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LBN 0
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_WIDTH 32
+/* The IPv4 address in little endian byte order. */
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_OFST 4
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LEN 4
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LBN 32
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_TSA_IPADDR
+ * TSA operations relating to the monitoring and expiry of local IP addresses
+ * discovered by the controller. These commands are sent from a TSA controller
+ * to a TSA adapter.
+ */
+#define MC_CMD_TSA_IPADDR 0x65
+#undef MC_CMD_0x65_PRIVILEGE_CTG
+
+#define MC_CMD_0x65_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_IPADDR_IN msgrequest */
+#define MC_CMD_TSA_IPADDR_IN_LEN 4
+/* Header containing information to identify which sub-operation of this
+ * command to perform. The header contains a 16-bit op-code. Unused space in
+ * this field is reserved for future expansion.
+ */
+#define MC_CMD_TSA_IPADDR_IN_OP_HDR_OFST 0
+#define MC_CMD_TSA_IPADDR_IN_OP_HDR_LEN 4
+#define MC_CMD_TSA_IPADDR_IN_OP_LBN 0
+#define MC_CMD_TSA_IPADDR_IN_OP_WIDTH 16
+/* enum: Request that the adapter verifies that the IPv4 addresses supplied are
+ * still in use by the host by sending ARP probes to the host. The MC does not
+ * wait for a response to the probes and sends an MCDI response to the
+ * controller once the probes have been sent to the host. The response to the
+ * probes (if there are any) will be forwarded to the controller using
+ * MC_CMD_TSA_INFO alerts.
+ */
+#define MC_CMD_TSA_IPADDR_OP_VALIDATE_IPV4 0x1
+/* enum: Notify the adapter that one or more IPv4 addresses are no longer valid
+ * for the host of the adapter. The adapter should remove the IPv4 addresses
+ * from its local cache.
+ */
+#define MC_CMD_TSA_IPADDR_OP_REMOVE_IPV4 0x2
+
+/* MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4 msgrequest */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMIN 16
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMAX 248
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LEN(num) (8+8*(num))
+/* Header containing information to identify which sub-operation of this
+ * command to perform. The header contains a 16-bit op-code. Unused space in
+ * this field is reserved for future expansion.
+ */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_OFST 0
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_LEN 4
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_LBN 0
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_WIDTH 16
+/* Number of IPv4 addresses to validate. */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_OFST 4
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_LEN 4
+/* The IPv4 addresses to validate, in struct MC_TSA_IPV4_ITEM format. */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LEN 8
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_OFST 12
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MINNUM 1
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MAXNUM 30
+
+/* MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4 msgresponse */
+#define MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4_LEN 0
+
+/* MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4 msgrequest */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMIN 16
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMAX 248
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LEN(num) (8+8*(num))
+/* Header containing information to identify which sub-operation of this
+ * command to perform. The header contains a 16-bit op-code. Unused space in
+ * this field is reserved for future expansion.
+ */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_OFST 0
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_LEN 4
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_LBN 0
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_WIDTH 16
+/* Number of IPv4 addresses to remove. */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_OFST 4
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_LEN 4
+/* The IPv4 addresses that have expired, in struct MC_TSA_IPV4_ITEM format. */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LEN 8
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_OFST 12
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MINNUM 1
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MAXNUM 30
+
+/* MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4 msgresponse */
+#define MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SECURE_NIC_INFO
+ * Get secure NIC information. While many of the features reported by these
+ * commands are related to TSA, they must be supported in firmware where TSA is
+ * disabled.
+ */
+#define MC_CMD_SECURE_NIC_INFO 0x132
+#undef MC_CMD_0x132_PRIVILEGE_CTG
+
+#define MC_CMD_0x132_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SECURE_NIC_INFO_IN msgrequest */
+#define MC_CMD_SECURE_NIC_INFO_IN_LEN 4
+/* sub-operation code info */
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_LBN 0
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_WIDTH 16
+/* enum: Get the status of various security settings, all signed along with a
+ * challenge chosen by the host.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OP_STATUS 0x0
+
+/* MC_CMD_SECURE_NIC_INFO_IN_STATUS msgrequest */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_LEN 24
+/* sub-operation code, must be MC_CMD_SECURE_NIC_INFO_OP_STATUS */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_OFST 0
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_LEN 4
+/* Type of key to be used to sign response. */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_OFST 4
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_LEN 4
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_UNUSED 0x0 /* enum */
+/* enum: Solarflare adapter authentication key, installed by Manftest. */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_SF_ADAPTER_AUTH 0x1
+/* enum: TSA binding key, installed after adapter is bound to a TSA controller.
+ * This is not supported in firmware which does not support TSA.
+ */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_TSA_BINDING 0x2
+/* enum: Customer adapter authentication key. Installed by the customer in the
+ * field, but otherwise similar to the Solarflare adapter authentication key.
+ */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CUSTOMER_ADAPTER_AUTH 0x3
+/* Random challenge generated by the host. */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_OFST 8
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_LEN 16
+
+/* MC_CMD_SECURE_NIC_INFO_OUT_STATUS msgresponse */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_LEN 420
+/* Length of the signature in MSG_SIGNATURE. */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_OFST 0
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_LEN 4
+/* Signature over the message, starting at MESSAGE_TYPE and continuing to the
+ * end of the MCDI response, allowing the message format to be extended. The
+ * signature uses ECDSA 384 encoding in ASN.1 format. It has variable length,
+ * with a maximum of 384 bytes.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_OFST 4
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN 384
+/* Enum value indicating the type of response. This protects against chosen
+ * message attacks. The enum values are random rather than sequential to make
+ * it unlikely that values will be reused should other commands in a different
+ * namespace need to create signed messages.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_OFST 388
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_LEN 4
+/* enum: Message type value for the response to a
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS message.
+ */
+#define MC_CMD_SECURE_NIC_INFO_STATUS 0xdb4
+/* The challenge provided by the host in the MC_CMD_SECURE_NIC_INFO_IN_STATUS
+ * message
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_OFST 392
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_LEN 16
+/* The first 32 bits of XPM memory, which include security and flag bits, die
+ * ID and chip ID revision. The meaning of these bits is defined in
+ * mc/include/mc/xpm.h in the firmwaresrc repository.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_OFST 408
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_LEN 4
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_OFST 412
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_LEN 2
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_OFST 414
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_LEN 2
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_OFST 416
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_LEN 2
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_OFST 418
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_LEN 2
+
+
+/***********************************/
+/* MC_CMD_TSA_TEST
+ * A simple ping-pong command just to test the adapter<>controller MCDI
+ * communication channel. This command makes not changes to the TSA adapter's
+ * internal state. It is used by the controller just to verify that the MCDI
+ * communication channel is working fine. This command takes no additonal
+ * parameters in request or response.
+ */
+#define MC_CMD_TSA_TEST 0x125
+#undef MC_CMD_0x125_PRIVILEGE_CTG
+
+#define MC_CMD_0x125_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_TEST_IN msgrequest */
+#define MC_CMD_TSA_TEST_IN_LEN 0
+
+/* MC_CMD_TSA_TEST_OUT msgresponse */
+#define MC_CMD_TSA_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_RULESET_OVERRIDE
+ * Override TSA ruleset that is currently active on the adapter. This operation
+ * does not modify the ruleset itself. This operation provides a mechanism to
+ * apply an allow-all or deny-all operation on all packets, thereby completely
+ * ignoring the rule-set configured on the adapter. The main purpose of this
+ * operation is to provide a deterministic state to the TSA firewall during
+ * rule-set transitions.
+ */
+#define MC_CMD_TSA_RULESET_OVERRIDE 0x12a
+#undef MC_CMD_0x12a_PRIVILEGE_CTG
+
+#define MC_CMD_0x12a_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_RULESET_OVERRIDE_IN msgrequest */
+#define MC_CMD_TSA_RULESET_OVERRIDE_IN_LEN 4
+/* The override state to apply. */
+#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_OFST 0
+#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_LEN 4
+/* enum: No override in place - the existing ruleset is in operation. */
+#define MC_CMD_TSA_RULESET_OVERRIDE_NONE 0x0
+/* enum: Block all packets seen on all datapath channel except those packets
+ * required for basic configuration of the TSA NIC such as ARPs and TSA-
+ * communication traffic. Such exceptional traffic is handled differently
+ * compared to TSA rulesets.
+ */
+#define MC_CMD_TSA_RULESET_OVERRIDE_BLOCK 0x1
+/* enum: Allow all packets through all datapath channel. The TSA adapter
+ * behaves like a normal NIC without any firewalls.
+ */
+#define MC_CMD_TSA_RULESET_OVERRIDE_ALLOW 0x2
+
+/* MC_CMD_TSA_RULESET_OVERRIDE_OUT msgresponse */
+#define MC_CMD_TSA_RULESET_OVERRIDE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSAC_REQUEST
+ * Generic command to send requests from a TSA controller to a TSA adapter.
+ * Specific usage is determined by the TYPE field.
+ */
+#define MC_CMD_TSAC_REQUEST 0x12b
+#undef MC_CMD_0x12b_PRIVILEGE_CTG
+
+#define MC_CMD_0x12b_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSAC_REQUEST_IN msgrequest */
+#define MC_CMD_TSAC_REQUEST_IN_LEN 4
+/* The type of request from the controller. */
+#define MC_CMD_TSAC_REQUEST_IN_TYPE_OFST 0
+#define MC_CMD_TSAC_REQUEST_IN_TYPE_LEN 4
+/* enum: Request the adapter to resend localIP information from it's cache. The
+ * command does not return any IP address information; IP addresses are sent as
+ * TSA notifications as descibed in MC_CMD_TSA_INFO_IN_LOCAL_IP.
+ */
+#define MC_CMD_TSAC_REQUEST_LOCALIP 0x0
+
+/* MC_CMD_TSAC_REQUEST_OUT msgresponse */
+#define MC_CMD_TSAC_REQUEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SUC_VERSION
+ * Get the version of the SUC
+ */
+#define MC_CMD_SUC_VERSION 0x134
+#undef MC_CMD_0x134_PRIVILEGE_CTG
+
+#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SUC_VERSION_IN msgrequest */
+#define MC_CMD_SUC_VERSION_IN_LEN 0
+
+/* MC_CMD_SUC_VERSION_OUT msgresponse */
+#define MC_CMD_SUC_VERSION_OUT_LEN 24
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
+#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
+/* The date, in seconds since the Unix epoch, when the firmware image was
+ * built.
+ */
+#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
+#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
+#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
+
+/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
+ * loader.
+ */
+#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
+#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
+#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
+/* enum: Requests the SUC boot version. */
+#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
+
+/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
+#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
+/* The SUC boot version */
+#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SUC_MANFTEST
+ * Operations to support manftest on SUC based systems.
+ */
+#define MC_CMD_SUC_MANFTEST 0x135
+#undef MC_CMD_0x135_PRIVILEGE_CTG
+
+#define MC_CMD_0x135_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SUC_MANFTEST_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_IN_LEN 4
+/* The manftest operation to be performed. */
+#define MC_CMD_SUC_MANFTEST_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_IN_OP_LEN 4
+/* enum: Read serial number and use count. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ 0x0
+/* enum: Update use count on wearout adapter. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE 0x1
+/* enum: Start an ADC calibration. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START 0x2
+/* enum: Read the status of an ADC calibration. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS 0x3
+/* enum: Read the results of an ADC calibration. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT 0x4
+/* enum: Read the PCIe configuration. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ 0x5
+/* enum: Write the PCIe configuration. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE 0x6
+/* enum: Write FRU information to SUC. The FRU information is taken from the
+ * FRU_INFORMATION partition. Attempts to write to read-only FRUs are rejected.
+ */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE 0x7
+
+/* MC_CMD_SUC_MANFTEST_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_WEAROUT_READ.
+ */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_LEN 20
+/* The serial number of the wearout adapter, see SF-112717-PR for format. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_OFST 0
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_LEN 16
+/* The use count of the wearout adapter. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_OFST 16
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE.
+ */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START.
+ */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS.
+ */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_LEN 4
+/* The combined status of the calibration operation. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_LEN 4
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_LBN 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_WIDTH 1
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_LBN 1
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_WIDTH 1
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_LBN 2
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_WIDTH 4
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_LBN 6
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_WIDTH 2
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT.
+ */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_LEN 12
+/* The set of calibration results. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_LEN 4
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_NUM 3
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ.
+ */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_LEN 4
+/* The PCIe vendor ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_OFST 0
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_LEN 2
+/* The PCIe device ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_OFST 2
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_LEN 2
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_LEN 8
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE.
+ */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_LEN 4
+/* The PCIe vendor ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_OFST 4
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_LEN 2
+/* The PCIe device ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_OFST 6
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_LEN 2
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_FRU_WRITE_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_FRU_WRITE
+ */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_CERTIFICATE
+ * Request a certificate.
+ */
+#define MC_CMD_GET_CERTIFICATE 0x12c
+#undef MC_CMD_0x12c_PRIVILEGE_CTG
+
+#define MC_CMD_0x12c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CERTIFICATE_IN msgrequest */
+#define MC_CMD_GET_CERTIFICATE_IN_LEN 8
+/* Type of the certificate to be retrieved. */
+#define MC_CMD_GET_CERTIFICATE_IN_TYPE_OFST 0
+#define MC_CMD_GET_CERTIFICATE_IN_TYPE_LEN 4
+#define MC_CMD_GET_CERTIFICATE_IN_UNUSED 0x0 /* enum */
+#define MC_CMD_GET_CERTIFICATE_IN_AAC 0x1 /* enum */
+/* enum: Adapter Authentication Certificate (AAC). The AAC is unique to each
+ * adapter and is used to verify its authenticity. It is installed by Manftest.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH 0x1
+#define MC_CMD_GET_CERTIFICATE_IN_AASC 0x2 /* enum */
+/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is shared
+ * by a group of adapters (typically a purchase order) and is used to verify
+ * the validity of AAC along with the SF root certificate. It is installed by
+ * Manftest.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH_SIGNING 0x2
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AAC 0x3 /* enum */
+/* enum: Customer Adapter Authentication Certificate. The Customer AAC is
+ * unique to each adapter and is used to verify its authenticity in cases where
+ * either the AAC is not installed or a customer desires to use their own
+ * certificate chain. It is installed by the customer.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH 0x3
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AASC 0x4 /* enum */
+/* enum: Customer Adapter Authentication Certificate. The Customer AASC is
+ * shared by a group of adapters and is used to verify the validity of the
+ * Customer AAC along with the customers root certificate. It is installed by
+ * the customer.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH_SIGNING 0x4
+/* Offset, measured in bytes, relative to the start of the certificate data
+ * from which the certificate is to be retrieved.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_OFST 4
+#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_LEN 4
+
+/* MC_CMD_GET_CERTIFICATE_OUT msgresponse */
+#define MC_CMD_GET_CERTIFICATE_OUT_LENMIN 13
+#define MC_CMD_GET_CERTIFICATE_OUT_LENMAX 252
+#define MC_CMD_GET_CERTIFICATE_OUT_LEN(num) (12+1*(num))
+/* Type of the certificate. */
+#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_OFST 0
+#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_CERTIFICATE_IN/TYPE */
+/* Offset, measured in bytes, relative to the start of the certificate data
+ * from which data in this message starts.
+ */
+#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_OFST 4
+#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_LEN 4
+/* Total length of the certificate data. */
+#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_OFST 8
+#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_LEN 4
+/* The certificate data. */
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_OFST 12
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_LEN 1
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MAXNUM 240
+
+
+/***********************************/
+/* MC_CMD_GET_NIC_GLOBAL
+ * Get a global value which applies to all PCI functions
+ */
+#define MC_CMD_GET_NIC_GLOBAL 0x12d
+#undef MC_CMD_0x12d_PRIVILEGE_CTG
+
+#define MC_CMD_0x12d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NIC_GLOBAL_IN msgrequest */
+#define MC_CMD_GET_NIC_GLOBAL_IN_LEN 4
+/* Key to request value for, see enum values in MC_CMD_SET_NIC_GLOBAL. If the
+ * given key is unknown to the current firmware, the call will fail with
+ * ENOENT.
+ */
+#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_OFST 0
+#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_LEN 4
+
+/* MC_CMD_GET_NIC_GLOBAL_OUT msgresponse */
+#define MC_CMD_GET_NIC_GLOBAL_OUT_LEN 4
+/* Value of requested key, see key descriptions below. */
+#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_OFST 0
+#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_NIC_GLOBAL
+ * Set a global value which applies to all PCI functions. Most global values
+ * can only be changed under specific conditions, and this call will return an
+ * appropriate error otherwise (see key descriptions).
+ */
+#define MC_CMD_SET_NIC_GLOBAL 0x12e
+#undef MC_CMD_0x12e_PRIVILEGE_CTG
+
+#define MC_CMD_0x12e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_NIC_GLOBAL_IN msgrequest */
+#define MC_CMD_SET_NIC_GLOBAL_IN_LEN 8
+/* Key to change value of. Firmware will return ENOENT for keys it doesn't know
+ * about.
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_OFST 0
+#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_LEN 4
+/* enum: Request switching the datapath firmware sub-variant. Currently only
+ * useful when running the DPDK f/w variant. See key values below, and the DPDK
+ * section of the EF10 Driver Writers Guide. Note that any driver attaching
+ * with the SUBVARIANT_AWARE flag cleared is implicitly considered as a request
+ * to switch back to the default sub-variant, and will thus reset this value.
+ * If a sub-variant switch happens, all other PCI functions will get their
+ * resources reset (they will see an MC reboot).
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT 0x1
+/* New value to set, see key descriptions above. */
+#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_OFST 4
+#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_LEN 4
+/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Default sub-variant with support
+ * for maximum features for the current f/w variant. A request from a
+ * privileged function to set this particular value will always succeed.
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT 0x0
+/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Increases packet rate at the cost
+ * of not supporting any TX checksum offloads. Only supported when running some
+ * f/w variants, others will return ENOTSUP (as reported by the homonymous bit
+ * in MC_CMD_GET_CAPABILITIES_V2). Can only be set when no other drivers are
+ * attached, and the calling driver must have no resources allocated. See the
+ * DPDK section of the EF10 Driver Writers Guide for a more detailed
+ * description with possible error codes.
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM 0x1
+
+
+/***********************************/
+/* MC_CMD_LTSSM_TRACE_POLL
+ * Medford2 hardware has support for logging all LTSSM state transitions to a
+ * hardware buffer. When built with WITH_LTSSM_TRACE=1, the firmware will
+ * periodially dump the contents of this hardware buffer to an internal
+ * firmware buffer for later extraction.
+ */
+#define MC_CMD_LTSSM_TRACE_POLL 0x12f
+#undef MC_CMD_0x12f_PRIVILEGE_CTG
+
+#define MC_CMD_0x12f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LTSSM_TRACE_POLL_IN msgrequest: Read transitions from the firmware
+ * internal buffer.
+ */
+#define MC_CMD_LTSSM_TRACE_POLL_IN_LEN 4
+/* The maximum number of row that the caller can accept. The format of each row
+ * is defined in MC_CMD_LTSSM_TRACE_POLL_OUT.
+ */
+#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_OFST 0
+#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_LEN 4
+
+/* MC_CMD_LTSSM_TRACE_POLL_OUT msgresponse */
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMIN 16
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMAX 248
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LEN(num) (8+8*(num))
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_OFST 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_LEN 4
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_LBN 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_LBN 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_LBN 31
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_WIDTH 1
+/* The number of rows present in this response. */
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_OFST 4
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_LEN 4
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_OFST 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LEN 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_OFST 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_OFST 12
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MINNUM 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MAXNUM 30
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_LBN 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_WIDTH 6
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_LBN 6
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_LBN 7
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_LBN 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_WIDTH 24
+/* The time of the LTSSM transition. Times are reported as fractional
+ * microseconds since MC boot (wrapping at 2^32us). The fractional part is
+ * reported in picoseconds. 0 <= TIMESTAMP_PS < 1000000 timestamp in seconds =
+ * ((TIMESTAMP_US + TIMESTAMP_PS / 1000000) / 1000000)
+ */
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_OFST 12
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_LEN 4
+
#endif /* _SIENA_MC_DRIVER_PCOL_H */
/*! \cidoxg_end */
diff --git a/drivers/net/sfc/base/efx_regs_mcdi_aoe.h b/drivers/net/sfc/base/efx_regs_mcdi_aoe.h
new file mode 100644
index 00000000..6aaf212f
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs_mcdi_aoe.h
@@ -0,0 +1,2914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2008-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+/*! \cidoxg_firmware_mc_cmd */
+
+#ifndef _SIENA_MC_DRIVER_PCOL_AOE_H
+#define _SIENA_MC_DRIVER_PCOL_AOE_H
+
+
+
+/***********************************/
+/* MC_CMD_FC
+ * Perform an FC operation
+ */
+#define MC_CMD_FC 0x9
+
+/* MC_CMD_FC_IN msgrequest */
+#define MC_CMD_FC_IN_LEN 4
+#define MC_CMD_FC_IN_OP_HDR_OFST 0
+#define MC_CMD_FC_IN_OP_HDR_LEN 4
+#define MC_CMD_FC_IN_OP_LBN 0
+#define MC_CMD_FC_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to FC. */
+#define MC_CMD_FC_OP_NULL 0x1
+/* enum: Unused opcode */
+#define MC_CMD_FC_OP_UNUSED 0x2
+/* enum: MAC driver commands */
+#define MC_CMD_FC_OP_MAC 0x3
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_READ32 0x4
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_WRITE32 0x5
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_READ 0x6
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_WRITE 0x7
+/* enum: FC firmware Version */
+#define MC_CMD_FC_OP_GET_VERSION 0x8
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_RX_READ 0x9
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa
+/* enum: SFP parameters */
+#define MC_CMD_FC_OP_SFP 0xb
+/* enum: DDR3 test */
+#define MC_CMD_FC_OP_DDR_TEST 0xc
+/* enum: Get Crash context from FC */
+#define MC_CMD_FC_OP_GET_ASSERT 0xd
+/* enum: Get FPGA Build registers */
+#define MC_CMD_FC_OP_FPGA_BUILD 0xe
+/* enum: Read map support commands */
+#define MC_CMD_FC_OP_READ_MAP 0xf
+/* enum: FC Capabilities */
+#define MC_CMD_FC_OP_CAPABILITIES 0x10
+/* enum: FC Global flags */
+#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11
+/* enum: FC IO using relative addressing modes */
+#define MC_CMD_FC_OP_IO_REL 0x12
+/* enum: FPGA link information */
+#define MC_CMD_FC_OP_UHLINK 0x13
+/* enum: Configure loopbacks and link on FPGA ports */
+#define MC_CMD_FC_OP_SET_LINK 0x14
+/* enum: Licensing operations relating to AOE */
+#define MC_CMD_FC_OP_LICENSE 0x15
+/* enum: Startup information to the FC */
+#define MC_CMD_FC_OP_STARTUP 0x16
+/* enum: Configure a DMA read */
+#define MC_CMD_FC_OP_DMA 0x17
+/* enum: Configure a timed read */
+#define MC_CMD_FC_OP_TIMED_READ 0x18
+/* enum: Control UART logging */
+#define MC_CMD_FC_OP_LOG 0x19
+/* enum: Get the value of a given clock_id */
+#define MC_CMD_FC_OP_CLOCK 0x1a
+/* enum: DDR3/QDR3 parameters */
+#define MC_CMD_FC_OP_DDR 0x1b
+/* enum: PTP and timestamp control */
+#define MC_CMD_FC_OP_TIMESTAMP 0x1c
+/* enum: Commands for SPI Flash interface */
+#define MC_CMD_FC_OP_SPI 0x1d
+/* enum: Commands for diagnostic components */
+#define MC_CMD_FC_OP_DIAG 0x1e
+/* enum: External AOE port. */
+#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0
+/* enum: Internal AOE port. */
+#define MC_CMD_FC_IN_PORT_INT_OFST 0x40
+
+/* MC_CMD_FC_IN_NULL msgrequest */
+#define MC_CMD_FC_IN_NULL_LEN 4
+#define MC_CMD_FC_IN_CMD_OFST 0
+#define MC_CMD_FC_IN_CMD_LEN 4
+
+/* MC_CMD_FC_IN_PHY msgrequest */
+#define MC_CMD_FC_IN_PHY_LEN 5
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* FC PHY driver operation code */
+#define MC_CMD_FC_IN_PHY_OP_OFST 4
+#define MC_CMD_FC_IN_PHY_OP_LEN 1
+/* enum: PHY init handler */
+#define MC_CMD_FC_OP_PHY_OP_INIT 0x1
+/* enum: PHY reconfigure handler */
+#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2
+/* enum: PHY reboot handler */
+#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3
+/* enum: PHY get_supported_cap handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4
+/* enum: PHY get_config handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5
+/* enum: PHY get_media_info handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6
+/* enum: PHY set_led handler */
+#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7
+/* enum: PHY lasi_interrupt handler */
+#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8
+/* enum: PHY check_link handler */
+#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9
+/* enum: PHY fill_stats handler */
+#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa
+/* enum: PHY bpx_link_state_changed handler */
+#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb
+/* enum: PHY get_state handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc
+/* enum: PHY start_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd
+/* enum: PHY poll_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe
+/* enum: PHY nvram_test handler */
+#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf
+/* enum: PHY relinquish handler */
+#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10
+/* enum: PHY read connection from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11
+/* enum: PHY read flags from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12
+
+/* MC_CMD_FC_IN_PHY_INIT msgrequest */
+#define MC_CMD_FC_IN_PHY_INIT_LEN 4
+#define MC_CMD_FC_IN_PHY_CMD_OFST 0
+#define MC_CMD_FC_IN_PHY_CMD_LEN 4
+
+/* MC_CMD_FC_IN_MAC msgrequest */
+#define MC_CMD_FC_IN_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_MAC_HEADER_OFST 4
+#define MC_CMD_FC_IN_MAC_HEADER_LEN 4
+#define MC_CMD_FC_IN_MAC_OP_LBN 0
+#define MC_CMD_FC_IN_MAC_OP_WIDTH 8
+/* enum: MAC reconfigure handler */
+#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1
+/* enum: MAC Set command - same as MC_CMD_SET_MAC */
+#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2
+/* enum: MAC statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3
+/* enum: MAC RX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6
+/* enum: MAC TX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7
+/* enum: MAC Read status */
+#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8
+/* enum: External FPGA port. */
+#define MC_CMD_FC_PORT_EXT 0x0
+/* enum: Internal Siena-facing FPGA ports. */
+#define MC_CMD_FC_PORT_INT 0x1
+#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */
+#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+/* MTU size */
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_LEN 4
+/* Drain Tx FIFO */
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_LEN 4
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_LEN 4
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_LEN 4
+
+/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */
+#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+/* MC Statistics index */
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_LEN 4
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_LEN 4
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1
+/* Number of statistics to read */
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_LEN 4
+#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */
+#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */
+
+/* MC_CMD_FC_IN_READ32 msgrequest */
+#define MC_CMD_FC_IN_READ32_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_READ32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_READ32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12
+#define MC_CMD_FC_IN_READ32_NUMWORDS_LEN 4
+
+/* MC_CMD_FC_IN_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_WRITE32_LENMIN 16
+#define MC_CMD_FC_IN_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12
+#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_TRC_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_READ_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_LEN 4
+
+/* MC_CMD_FC_IN_TRC_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_WRITE_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4
+
+/* MC_CMD_FC_IN_GET_VERSION msgrequest */
+#define MC_CMD_FC_IN_GET_VERSION_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+
+/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_LEN 4
+
+/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2
+
+/* MC_CMD_FC_IN_SFP msgrequest */
+#define MC_CMD_FC_IN_SFP_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* Link speed is 100, 1000, 10000, 40000 */
+#define MC_CMD_FC_IN_SFP_SPEED_OFST 4
+#define MC_CMD_FC_IN_SFP_SPEED_LEN 4
+/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_LEN 4
+/* Not relevant for cards with QSFP modules. For older cards, true if module is
+ * a dual speed SFP+ module.
+ */
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_LEN 4
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16
+#define MC_CMD_FC_IN_SFP_PRESENT_LEN 4
+/* The type of the SFP+ Module. For later cards with QSFP modules, this field
+ * is unused and the type is communicated by other means.
+ */
+#define MC_CMD_FC_IN_SFP_TYPE_OFST 20
+#define MC_CMD_FC_IN_SFP_TYPE_LEN 4
+/* Capabilities corresponding to 1 bits. */
+#define MC_CMD_FC_IN_SFP_CAPS_OFST 24
+#define MC_CMD_FC_IN_SFP_CAPS_LEN 4
+
+/* MC_CMD_FC_IN_DDR_TEST msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4
+#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8
+/* enum: DRAM Test Start */
+#define MC_CMD_FC_OP_DDR_TEST_START 0x1
+/* enum: DRAM Test Poll */
+#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2
+
+/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_LEN 4
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1
+
+/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12
+#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0
+#define MC_CMD_FC_IN_DDR_TEST_CMD_LEN 4
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */
+/* Clear previous test result and prepare for restarting DDR test */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_LEN 4
+
+/* MC_CMD_FC_IN_GET_ASSERT msgrequest */
+#define MC_CMD_FC_IN_GET_ASSERT_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+
+/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */
+#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* FPGA build info operation code */
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_LEN 4
+/* enum: Get the build registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1
+/* enum: Get the services registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2
+/* enum: Get the BSP version */
+#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3
+/* enum: Get build register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4
+/* enum: GEt the services register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5
+
+/* MC_CMD_FC_IN_READ_MAP msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4
+#define MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4
+#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0
+#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8
+/* enum: Get the number of map regions */
+#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1
+/* enum: Get the specified map */
+#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2
+
+/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_MAP_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAP_INDEX_LEN 4
+
+/* MC_CMD_FC_IN_CAPABILITIES msgrequest */
+#define MC_CMD_FC_IN_CAPABILITIES_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+
+/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_LEN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1
+
+/* MC_CMD_FC_IN_IO_REL msgrequest */
+#define MC_CMD_FC_IN_IO_REL_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4
+#define MC_CMD_FC_IN_IO_REL_HEADER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_OP_LBN 0
+#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8
+/* enum: Get the base address that the FC applies to relative commands */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1
+/* enum: Read data */
+#define MC_CMD_FC_IN_IO_REL_READ32 0x2
+/* enum: Write data */
+#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8
+/* enum: Application address space */
+#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1
+/* enum: Flash address space */
+#define MC_CMD_FC_COMP_TYPE_FLASH 0x2
+
+/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_LEN 4
+
+/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59
+
+/* MC_CMD_FC_IN_UHLINK msgrequest */
+#define MC_CMD_FC_IN_UHLINK_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4
+#define MC_CMD_FC_IN_UHLINK_HEADER_LEN 4
+#define MC_CMD_FC_IN_UHLINK_OP_LBN 0
+#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8
+/* enum: Get PHY configuration info */
+#define MC_CMD_FC_OP_UHLINK_PHY 0x1
+/* enum: Get MAC configuration info */
+#define MC_CMD_FC_OP_UHLINK_MAC 0x2
+/* enum: Get Rx eye table */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5
+/* enum: Retune Rx settings */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6
+/* enum: Set loopback mode on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7
+/* enum: Get loopback mode config state on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */
+#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */
+#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_LEN 4
+#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_LEN 4
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_LEN 4
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_LEN 4
+#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_LEN 4
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_LEN 4
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_LEN 4
+
+/* MC_CMD_FC_IN_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_SET_LINK_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4
+#define MC_CMD_FC_IN_SET_LINK_MODE_LEN 4
+#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8
+#define MC_CMD_FC_IN_SET_LINK_SPEED_LEN 4
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_LEN 4
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1
+
+/* MC_CMD_FC_IN_LICENSE msgrequest */
+#define MC_CMD_FC_IN_LICENSE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_LICENSE_OP_OFST 4
+#define MC_CMD_FC_IN_LICENSE_OP_LEN 4
+#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */
+#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */
+
+/* MC_CMD_FC_IN_STARTUP msgrequest */
+#define MC_CMD_FC_IN_STARTUP_LEN 40
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4
+#define MC_CMD_FC_IN_STARTUP_BASE_LEN 4
+#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8
+#define MC_CMD_FC_IN_STARTUP_LENGTH_LEN 4
+/* Length of identifier */
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_LEN 4
+/* Identifier for AOE FPGA */
+#define MC_CMD_FC_IN_STARTUP_ID_OFST 16
+#define MC_CMD_FC_IN_STARTUP_ID_LEN 1
+#define MC_CMD_FC_IN_STARTUP_ID_NUM 24
+
+/* MC_CMD_FC_IN_DMA msgrequest */
+#define MC_CMD_FC_IN_DMA_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DMA_OP_OFST 4
+#define MC_CMD_FC_IN_DMA_OP_LEN 4
+#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */
+#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DMA_STOP msgrequest */
+#define MC_CMD_FC_IN_DMA_STOP_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* MC_CMD_FC_IN_DMA_OP_LEN 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_IN_DMA_READ msgrequest */
+#define MC_CMD_FC_IN_DMA_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* MC_CMD_FC_IN_DMA_OP_LEN 4 */
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_LEN 4
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_LEN 4
+
+/* MC_CMD_FC_IN_TIMED_READ msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4
+#define MC_CMD_FC_IN_TIMED_READ_OP_LEN 4
+#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */
+
+/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_LEN 4
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_LEN 4
+/* Length of host transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_LEN 4
+/* Offset back from aoe_address to apply operation to */
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_LEN 4
+/* Data to apply at offset */
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_LEN 4
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_LEN 4
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */
+/* Period at which reads are performed (100ms units) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_LEN 4
+
+/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_IN_LOG msgrequest */
+#define MC_CMD_FC_IN_LOG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_LOG_OP_OFST 4
+#define MC_CMD_FC_IN_LOG_OP_LEN 4
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */
+#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */
+
+/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* MC_CMD_FC_IN_LOG_OP_LEN 4 */
+/* Partition offset into flash */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_LEN 4
+/* Partition length */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_LEN 4
+/* Partition erase size */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_LEN 4
+
+/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* MC_CMD_FC_IN_LOG_OP_LEN 4 */
+/* Enable/disable printing to JTAG UART */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_LEN 4
+
+/* MC_CMD_FC_IN_CLOCK msgrequest: Perform a clock operation */
+#define MC_CMD_FC_IN_CLOCK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_CLOCK_OP_OFST 4
+#define MC_CMD_FC_IN_CLOCK_OP_LEN 4
+#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */
+#define MC_CMD_FC_IN_CLOCK_ID_OFST 8
+#define MC_CMD_FC_IN_CLOCK_ID_LEN 4
+#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */
+
+/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest: Retrieve the clock value of the
+ * specified clock
+ */
+#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */
+
+/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest: Set the clock value of the specified
+ * clock
+ */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_LEN 4
+
+/* MC_CMD_FC_IN_DDR msgrequest */
+#define MC_CMD_FC_IN_DDR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DDR_OP_OFST 4
+#define MC_CMD_FC_IN_DDR_OP_LEN 4
+#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_OFST 8
+#define MC_CMD_FC_IN_DDR_BANK_LEN 4
+#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */
+#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* MC_CMD_FC_IN_DDR_OP_LEN 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */
+/* Flags */
+#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12
+#define MC_CMD_FC_IN_DDR_FLAGS_LEN 4
+#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */
+/* 128-byte page of serial presence detect data read from module's EEPROM */
+#define MC_CMD_FC_IN_DDR_SPD_OFST 16
+#define MC_CMD_FC_IN_DDR_SPD_LEN 1
+#define MC_CMD_FC_IN_DDR_SPD_NUM 128
+/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_LEN 4
+
+/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* MC_CMD_FC_IN_DDR_OP_LEN 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */
+/* Size of DDR */
+#define MC_CMD_FC_IN_DDR_SIZE_OFST 12
+#define MC_CMD_FC_IN_DDR_SIZE_LEN 4
+
+/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */
+#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* MC_CMD_FC_IN_DDR_OP_LEN 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */
+
+/* MC_CMD_FC_IN_TIMESTAMP msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* FC timestamp operation code */
+#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_OP_LEN 4
+/* enum: Read transmit timestamp(s) */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0
+/* enum: Read snapshot timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1
+/* enum: Clear all transmit timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_LEN 4
+/* Control filtering of the returned timestamp and sequence number specified
+ * here
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_LEN 4
+/* enum: Return most recent timestamp. No filtering */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0
+/* enum: Match timestamp against the PTP clock ID, port number and sequence
+ * number specified
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1
+/* Clock identity of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16
+/* Port number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_LEN 4
+/* Sequence number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_LEN 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_LEN 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_LEN 4
+
+/* MC_CMD_FC_IN_SPI msgrequest */
+#define MC_CMD_FC_IN_SPI_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* Basic commands for SPI Flash. */
+#define MC_CMD_FC_IN_SPI_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_OP_LEN 4
+/* enum: SPI Flash read */
+#define MC_CMD_FC_IN_SPI_READ 0x0
+/* enum: SPI Flash write */
+#define MC_CMD_FC_IN_SPI_WRITE 0x1
+/* enum: SPI Flash erase */
+#define MC_CMD_FC_IN_SPI_ERASE 0x2
+
+/* MC_CMD_FC_IN_SPI_READ msgrequest */
+#define MC_CMD_FC_IN_SPI_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_READ_OP_LEN 4
+#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_READ_ADDR_LEN 4
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_LEN 4
+
+/* MC_CMD_FC_IN_SPI_WRITE msgrequest */
+#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16
+#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252
+#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_WRITE_OP_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_SPI_ERASE msgrequest */
+#define MC_CMD_FC_IN_SPI_ERASE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_ERASE_OP_LEN 4
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_LEN 4
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_LEN 4
+
+/* MC_CMD_FC_IN_DIAG msgrequest */
+#define MC_CMD_FC_IN_DIAG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* Operation code indicating component type */
+#define MC_CMD_FC_IN_DIAG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_OP_LEN 4
+/* enum: Power noise generator. */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0
+/* enum: DDR soak test component. */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1
+/* enum: Diagnostics datapath control component. */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_LEN 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_LEN 4
+/* enum: Read the configuration (the 32-bit values in each of the clock enable
+ * count and toggle count registers)
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0
+/* enum: Write a new configuration to the clock enable count and toggle count
+ * registers
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_LEN 4
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_LEN 4
+/* The 32-bit value to be written to the toggle count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_LEN 4
+/* The 32-bit value to be written to the clock enable count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_LEN 4
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_LEN 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_LEN 4
+/* enum: Starts DDR soak test on selected banks */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0
+/* enum: Read status of DDR soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1
+/* enum: Stop test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2
+/* enum: Set or clear bit that triggers fake errors. These cause subsequent
+ * tests to fail until the bit is cleared.
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_LEN 4
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_LEN 4
+/* Pattern to use in the soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */
+/* Either multiple automatic tests until a STOP command is issued, or one
+ * single test
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_LEN 4
+/* DDR bank to read status from */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_LEN 4
+#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */
+#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */
+#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */
+#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */
+#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_LEN 4
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_LEN 4
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_LEN 4
+/* Mask of DDR banks to set/clear error flag on */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_LEN 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_LEN 4
+/* enum: Set a known datapath configuration */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0
+/* enum: Apply raw config to datapath control registers */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_LEN 4
+/* Datapath configuration identifier */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_LEN 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_LEN 4
+/* Value to write into control register 1 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_LEN 4
+/* Value to write into control register 2 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_LEN 4
+/* Value to write into control register 3 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_LEN 4
+
+/* MC_CMD_FC_OUT msgresponse */
+#define MC_CMD_FC_OUT_LEN 0
+
+/* MC_CMD_FC_OUT_NULL msgresponse */
+#define MC_CMD_FC_OUT_NULL_LEN 0
+
+/* MC_CMD_FC_OUT_READ32 msgresponse */
+#define MC_CMD_FC_OUT_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_TRC_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_READ_LEN 16
+#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4
+
+/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_GET_VERSION msgresponse */
+#define MC_CMD_FC_OUT_GET_VERSION_LEN 12
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_LEN 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2
+
+/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */
+#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_LEN 4
+
+/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS
+#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */
+#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_RX_NSTATS 0x19
+
+/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS
+#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */
+#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_TX_NSTATS 0x16
+
+/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3)
+/* MAC Statistics */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_MAC msgresponse */
+#define MC_CMD_FC_OUT_MAC_LEN 0
+
+/* MC_CMD_FC_OUT_SFP msgresponse */
+#define MC_CMD_FC_OUT_SFP_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8
+/* enum: Test not yet initiated */
+#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0
+/* enum: Test is in progress */
+#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1
+/* enum: Timed completed */
+#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2
+/* enum: Test did not complete in specified time */
+#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1
+/* Test result from FPGA */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_LEN 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */
+
+/* MC_CMD_FC_OUT_DDR_TEST msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_LEN 0
+
+/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */
+#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8
+/* enum: No crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0
+/* enum: New crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1
+/* enum: Crash data has been sent */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0
+/* enum: Crash due to exception. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1
+/* enum: Crash due to assertion. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2
+/* Failing PC value */
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_LEN 4
+/* Instruction at which exception occurred */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_LEN 4
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_LEN 4
+
+/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8
+#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */
+#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */
+#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4
+#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */
+#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4
+/* Qsys system ID */
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_LEN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4
+
+/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4
+/* Number of maps */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_LEN 4
+
+/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164
+/* Index of the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_LEN 4
+/* Options for the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_LEN 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */
+/* Address of start of map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12
+/* Length of address map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20
+/* Component information field */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_LEN 4
+/* License expiry data for map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32
+/* Name of the component */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128
+
+/* MC_CMD_FC_OUT_READ_MAP msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_LEN 0
+
+/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */
+#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8
+/* Number of internal ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_LEN 4
+/* Number of external ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_LEN 4
+
+/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_LEN 4
+
+/* MC_CMD_FC_OUT_IO_REL msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_LEN 0
+
+/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_LEN 4
+
+/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16
+/* Transceiver Transmit settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16
+/* Transceiver Receive settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16
+/* Rx eye opening */
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16
+/* PCS status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_LEN 4
+/* Link status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1
+/* Current SFp parameters applied */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20
+/* Link speed is 100, 1000, 10000 */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_LEN 4
+/* Length of copper cable - zero when not relevant */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_LEN 4
+/* True if a dual speed SFP+ module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_LEN 4
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_LEN 4
+/* The type of the SFP+ Module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_LEN 4
+/* PHY config flags */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1
+
+/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20
+/* MAC configuration applied */
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_LEN 4
+/* MTU size */
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_LEN 4
+/* IF Mode status */
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_LEN 4
+/* MAC address configured */
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16
+
+/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3)
+/* Rx Eye measurements */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3)
+/* Has the eye plot dump completed and data returned is valid? */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_LEN 4
+/* Rx Eye binary plot */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_LEN 4
+
+/* MC_CMD_FC_OUT_UHLINK msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LEN 0
+
+/* MC_CMD_FC_OUT_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_LICENSE msgresponse */
+#define MC_CMD_FC_OUT_LICENSE_LEN 12
+/* Count of valid keys */
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_LEN 4
+/* Count of invalid keys */
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_LEN 4
+/* Count of blacklisted keys */
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_LEN 4
+
+/* MC_CMD_FC_OUT_STARTUP msgresponse */
+#define MC_CMD_FC_OUT_STARTUP_LEN 4
+/* Capabilities of the FPGA/FC */
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_LEN 4
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1
+
+/* MC_CMD_FC_OUT_DMA_READ msgresponse */
+#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1
+#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252
+#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num))
+/* The data read */
+#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252
+
+/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4
+/* Timer handle */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_LEN 4
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_LEN 4
+/* Length of host transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_LEN 4
+/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_LEN 4
+/* When active, start read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40
+/* When active, end read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48
+
+/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */
+#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0
+
+/* MC_CMD_FC_OUT_LOG msgresponse */
+#define MC_CMD_FC_OUT_LOG_LEN 0
+
+/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_LEN 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_LEN 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_LEN 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_LEN 4
+
+/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_LEN 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_LEN 4
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num))
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_LEN 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_LEN 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31
+
+/* MC_CMD_FC_OUT_SPI_READ msgresponse */
+#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4
+#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252
+#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */
+#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */
+#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8
+/* The 32-bit value read from the toggle count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_LEN 4
+/* The 32-bit value read from the clock enable count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_LEN 4
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8
+/* DDR soak test status word; bits [4:0] are relevant. */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1
+/* DDR soak test error count */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_LEN 4
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0
+
+
+/***********************************/
+/* MC_CMD_AOE
+ * AOE operations on MC
+ */
+#define MC_CMD_AOE 0xa
+
+/* MC_CMD_AOE_IN msgrequest */
+#define MC_CMD_AOE_IN_LEN 4
+#define MC_CMD_AOE_IN_OP_HDR_OFST 0
+#define MC_CMD_AOE_IN_OP_HDR_LEN 4
+#define MC_CMD_AOE_IN_OP_LBN 0
+#define MC_CMD_AOE_IN_OP_WIDTH 8
+/* enum: FPGA and CPLD information */
+#define MC_CMD_AOE_OP_INFO 0x1
+/* enum: Currents and voltages read from MCP3424s; DEBUG */
+#define MC_CMD_AOE_OP_CURRENTS 0x2
+/* enum: Temperatures at locations around the PCB; DEBUG */
+#define MC_CMD_AOE_OP_TEMPERATURES 0x3
+/* enum: Set CPLD to idle */
+#define MC_CMD_AOE_OP_CPLD_IDLE 0x4
+/* enum: Read from CPLD register */
+#define MC_CMD_AOE_OP_CPLD_READ 0x5
+/* enum: Write to CPLD register */
+#define MC_CMD_AOE_OP_CPLD_WRITE 0x6
+/* enum: Execute CPLD instruction */
+#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7
+/* enum: Reprogram the CPLD on the AOE device */
+#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8
+/* enum: AOE power control */
+#define MC_CMD_AOE_OP_POWER 0x9
+/* enum: AOE image loading */
+#define MC_CMD_AOE_OP_LOAD 0xa
+/* enum: Fan monitoring */
+#define MC_CMD_AOE_OP_FAN_CONTROL 0xb
+/* enum: Fan failures since last reset */
+#define MC_CMD_AOE_OP_FAN_FAILURES 0xc
+/* enum: Get generic AOE MAC statistics */
+#define MC_CMD_AOE_OP_MAC_STATS 0xd
+/* enum: Retrieve PHY specific information */
+#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe
+/* enum: Write a number of JTAG primitive commands, return will give data */
+#define MC_CMD_AOE_OP_JTAG_WRITE 0xf
+/* enum: Control access to the FPGA via the Siena JTAG Chain */
+#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10
+/* enum: Set the MTU offset between Siena and AOE MACs */
+#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11
+/* enum: How link state is handled */
+#define MC_CMD_AOE_OP_LINK_STATE 0x12
+/* enum: How Siena MAC statistics are reported (deprecated - use
+ * MC_CMD_AOE_OP_ASIC_STATS)
+ */
+#define MC_CMD_AOE_OP_SIENA_STATS 0x13
+/* enum: How native ASIC MAC statistics are reported - replaces the deprecated
+ * command MC_CMD_AOE_OP_SIENA_STATS
+ */
+#define MC_CMD_AOE_OP_ASIC_STATS 0x13
+/* enum: DDR memory information */
+#define MC_CMD_AOE_OP_DDR 0x14
+/* enum: FC control */
+#define MC_CMD_AOE_OP_FC 0x15
+/* enum: DDR ECC status reads */
+#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16
+/* enum: Commands for MC-SPI Master emulation */
+#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17
+/* enum: Commands for FC boot control */
+#define MC_CMD_AOE_OP_FC_BOOT 0x18
+/* enum: Get number of internal ports */
+#define MC_CMD_AOE_OP_GET_ASIC_PORTS 0x19
+/* enum: Get FC assert information and register dump */
+#define MC_CMD_AOE_OP_GET_FC_ASSERT_INFO 0x1a
+
+/* MC_CMD_AOE_OUT msgresponse */
+#define MC_CMD_AOE_OUT_LEN 0
+
+/* MC_CMD_AOE_IN_INFO msgrequest */
+#define MC_CMD_AOE_IN_INFO_LEN 4
+#define MC_CMD_AOE_IN_CMD_OFST 0
+#define MC_CMD_AOE_IN_CMD_LEN 4
+
+/* MC_CMD_AOE_IN_CURRENTS msgrequest */
+#define MC_CMD_AOE_IN_CURRENTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */
+#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_CPLD_READ msgrequest */
+#define MC_CMD_AOE_IN_CPLD_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_LEN 4
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_LEN 4
+
+/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_LEN 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_LEN 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_LEN 4
+
+/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_LEN 4
+
+/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_LEN 4
+/* enum: Reprogram CPLD, poll for completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1
+/* enum: Reprogram CPLD, send event on completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3
+/* enum: Get status of reprogramming operation */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4
+
+/* MC_CMD_AOE_IN_POWER msgrequest */
+#define MC_CMD_AOE_IN_POWER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Turn on or off AOE power */
+#define MC_CMD_AOE_IN_POWER_OP_OFST 4
+#define MC_CMD_AOE_IN_POWER_OP_LEN 4
+/* enum: Turn off FPGA power */
+#define MC_CMD_AOE_IN_POWER_OFF 0x0
+/* enum: Turn on FPGA power */
+#define MC_CMD_AOE_IN_POWER_ON 0x1
+/* enum: Clear peak power measurement */
+#define MC_CMD_AOE_IN_POWER_CLEAR 0x2
+/* enum: Show current power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3
+/* enum: Show peak power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4
+/* enum: Show current DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5
+/* enum: Show peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6
+/* enum: Clear peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7
+
+/* MC_CMD_AOE_IN_LOAD msgrequest */
+#define MC_CMD_AOE_IN_LOAD_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence
+ */
+#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4
+#define MC_CMD_AOE_IN_LOAD_IMAGE_LEN 4
+
+/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */
+#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* If non zero report measured fan RPM rather than nominal */
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_LEN 4
+
+/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */
+#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_MAC_STATS msgrequest */
+#define MC_CMD_AOE_IN_MAC_STATS_LEN 24
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* AOE port */
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_LEN 4
+/* Host memory address for statistics */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_LEN 4
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16
+/* Length of DMA data (optional) */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_LEN 4
+
+/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* AOE port */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_LEN 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_LEN 4
+
+/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num))
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Enable or disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_LEN 4
+/* enum: Enable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1
+/* enum: Disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2
+
+/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_LEN 4
+/* enum: Apply to all external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000
+/* enum: Apply to all internal ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000
+/* The MTU offset to be applied to the external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_LEN 4
+
+/* MC_CMD_AOE_IN_LINK_STATE msgrequest */
+#define MC_CMD_AOE_IN_LINK_STATE_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_LEN 4
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8
+/* enum: AOE and associated external port */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0
+/* enum: AOE and OR of all external ports */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1
+/* enum: Individual ports */
+#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2
+/* enum: Configure link state mode on given AOE port */
+#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8
+/* enum: No-op */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0
+/* enum: logical OR of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1
+/* enum: logical AND of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16
+
+/* MC_CMD_AOE_IN_GET_ASIC_PORTS msgrequest */
+#define MC_CMD_AOE_IN_GET_ASIC_PORTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_GET_FC_ASSERT_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_FC_ASSERT_INFO_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */
+#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_LEN 4
+/* enum: Statistics from Siena (default) */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */
+#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_LEN 4
+/* enum: Statistics from the ASIC (default) */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_DDR msgrequest */
+#define MC_CMD_AOE_IN_DDR_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_DDR_BANK_OFST 4
+#define MC_CMD_AOE_IN_DDR_BANK_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+/* Page index of SPD data */
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_LEN 4
+
+/* MC_CMD_AOE_IN_FC msgrequest */
+#define MC_CMD_AOE_IN_FC_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Basic commands for MC SPI Master emulation. */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_LEN 4
+/* enum: MC SPI read */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0
+/* enum: MC SPI write */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_LEN 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_LEN 4
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_LEN 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_LEN 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_LEN 4
+
+/* MC_CMD_AOE_IN_FC_BOOT msgrequest */
+#define MC_CMD_AOE_IN_FC_BOOT_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* FC boot control flags */
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_LEN 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1
+
+/* MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_LBN 8
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_WIDTH 8
+/* enum: No crash data available */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 */
+/* enum: New crash data available */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 */
+/* enum: Crash data has been sent */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_LBN 0
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 */
+/* enum: Crash due to exception. */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 */
+/* enum: Crash due to assertion. */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 */
+/* Failing PC value */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_OFST 8
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_LEN 4
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_OFST 132
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_LEN 4
+/* Instruction at which exception occurred */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_OFST 136
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_LEN 4
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_LEN 4
+
+/* MC_CMD_AOE_OUT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_INFO_LEN 44
+/* JTAG IDCODE of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_LEN 4
+/* Version of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_LEN 4
+/* JTAG IDCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_LEN 4
+/* JTAG USERCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_LEN 4
+/* FPGA type - read from CPLD straps */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_LEN 4
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */
+/* FPGA state (debug) */
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_LEN 4
+/* FPGA image - partition from which loaded */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_LEN 4
+/* FC state */
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_LEN 4
+/* enum: Set if watchdog working */
+#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1
+/* enum: Set if MC-FC communications working */
+#define MC_CMD_AOE_OUT_INFO_COMMS 0x2
+/* Random pieces of information */
+#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32
+#define MC_CMD_AOE_OUT_INFO_FLAGS_LEN 4
+/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */
+#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1
+/* enum: CPLD apparently good */
+#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2
+/* enum: FPGA working normally */
+#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4
+/* enum: FPGA is powered */
+#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8
+/* enum: Board has incompatible SODIMMs fitted */
+#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10
+/* enum: Board has ByteBlaster connected */
+#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20
+/* enum: FPGA Boot flash has an invalid header. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40
+/* enum: FPGA Application flash is accessible. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80
+/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_LEN 4
+#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */
+/* Result of FC booting - not valid while a ByteBlaster is connected. */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_LEN 4
+/* enum: No error */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0
+/* enum: Bad address set in CPLD */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1
+/* enum: Bad header */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2
+/* enum: Bad text section details */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3
+/* enum: Bad checksum */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
+/* enum: Bad BSP */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
+/* enum: Flash mode is invalid */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
+/* enum: FC application loaded and execution attempted */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
+/* enum: FC application Started */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81
+/* enum: No bootrom in FPGA */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff
+
+/* MC_CMD_AOE_OUT_CURRENTS msgresponse */
+#define MC_CMD_AOE_OUT_CURRENTS_LEN 68
+/* Set of currents and voltages (mA or mV as appropriate) */
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17
+#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */
+
+/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */
+#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40
+/* Set of temperatures */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10
+/* enum: The first set of enum values are for Modena code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */
+/* enum: The second set of enum values are for Sorrento code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */
+
+/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4
+/* The value read from the CPLD */
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_LEN 4
+
+/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num))
+/* Failure counts for each fan */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63
+
+/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4
+/* Results of status command (only) */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_LEN 4
+
+/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */
+#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0
+
+/* MC_CMD_AOE_OUT_POWER_ON msgresponse */
+#define MC_CMD_AOE_OUT_POWER_ON_LEN 0
+
+/* MC_CMD_AOE_OUT_LOAD msgresponse */
+#define MC_CMD_AOE_OUT_LOAD_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */
+#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA
+ * for details
+ */
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+
+/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_LEN 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num))
+/* Used to align the in and out data blocks so the MC can re-use the cmd */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_LEN 4
+/* out bytes */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */
+#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR msgresponse */
+#define MC_CMD_AOE_OUT_DDR_LENMIN 17
+#define MC_CMD_AOE_OUT_DDR_LENMAX 252
+#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num))
+/* Information on the module. */
+#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_FLAGS_LEN 4
+#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1
+/* Memory size, in MB. */
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_LEN 4
+/* The memory type, as reported from SPD information */
+#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8
+#define MC_CMD_AOE_OUT_DDR_TYPE_LEN 4
+/* Nominal voltage of the module (as applied) */
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_LEN 4
+/* SPD data read from the module */
+#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16
+#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236
+
+/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */
+#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0
+
+/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */
+#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0
+
+/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */
+#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */
+#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_FC msgresponse */
+#define MC_CMD_AOE_OUT_FC_LEN 0
+
+/* MC_CMD_AOE_OUT_GET_ASIC_PORTS msgresponse */
+#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_LEN 4
+/* get the number of internal ports */
+#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_OFST 0
+#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_LEN 4
+
+/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8
+/* Flags describing status info on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_LEN 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1
+/* DDR ECC status on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_LEN 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_LEN 4
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0
+
+/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */
+#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0
+
+#endif /* _SIENA_MC_DRIVER_PCOL_AOE_H */
+/*! \cidoxg_end */
diff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c
index c0dcb752..4fd73bab 100644
--- a/drivers/net/sfc/base/efx_rx.c
+++ b/drivers/net/sfc/base/efx_rx.c
@@ -107,7 +107,7 @@ siena_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const efx_rxq_type_data_t *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
@@ -151,7 +151,7 @@ static const efx_rx_ops_t __efx_rx_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_rx_ops_t __efx_rx_ef10_ops = {
ef10_rx_init, /* erxo_init */
ef10_rx_fini, /* erxo_fini */
@@ -178,7 +178,7 @@ static const efx_rx_ops_t __efx_rx_ef10_ops = {
ef10_rx_qcreate, /* erxo_qcreate */
ef10_rx_qdestroy, /* erxo_qdestroy */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
@@ -220,6 +220,12 @@ efx_rx_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -288,6 +294,94 @@ fail1:
#endif /* EFSYS_OPT_RX_SCATTER */
#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_hash_flags_get(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t hash_alg,
+ __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags,
+ __out unsigned int *nflagsp)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ boolean_t l4;
+ boolean_t additional_modes;
+ unsigned int *entryp = flags;
+ efx_rc_t rc;
+
+ if (flags == NULL || nflagsp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ l4 = encp->enc_rx_scale_l4_hash_supported;
+ additional_modes = encp->enc_rx_scale_additional_modes_supported;
+
+#define LIST_FLAGS(_entryp, _class, _l4_hashing, _additional_modes) \
+ do { \
+ if (_l4_hashing) { \
+ *(_entryp++) = EFX_RX_HASH(_class, 4TUPLE); \
+ \
+ if (_additional_modes) { \
+ *(_entryp++) = \
+ EFX_RX_HASH(_class, 2TUPLE_DST); \
+ *(_entryp++) = \
+ EFX_RX_HASH(_class, 2TUPLE_SRC); \
+ } \
+ } \
+ \
+ *(_entryp++) = EFX_RX_HASH(_class, 2TUPLE); \
+ \
+ if (_additional_modes) { \
+ *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_DST); \
+ *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_SRC); \
+ } \
+ \
+ *(_entryp++) = EFX_RX_HASH(_class, DISABLE); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+ switch (hash_alg) {
+ case EFX_RX_HASHALG_PACKED_STREAM:
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0)
+ break;
+ /* FALLTHRU */
+ case EFX_RX_HASHALG_TOEPLITZ:
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0)
+ break;
+
+ LIST_FLAGS(entryp, IPV4_TCP, l4, additional_modes);
+ LIST_FLAGS(entryp, IPV6_TCP, l4, additional_modes);
+
+ if (additional_modes) {
+ LIST_FLAGS(entryp, IPV4_UDP, l4, additional_modes);
+ LIST_FLAGS(entryp, IPV6_UDP, l4, additional_modes);
+ }
+
+ LIST_FLAGS(entryp, IPV4, B_FALSE, additional_modes);
+ LIST_FLAGS(entryp, IPV6, B_FALSE, additional_modes);
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+#undef LIST_FLAGS
+
+ *nflagsp = (unsigned int)(entryp - flags);
+ EFSYS_ASSERT3U(*nflagsp, <=, EFX_RX_HASH_NFLAGS);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
__checkReturn efx_rc_t
efx_rx_hash_default_support_get(
__in efx_nic_t *enp,
@@ -419,19 +513,82 @@ efx_rx_scale_mode_set(
__in boolean_t insert)
{
const efx_rx_ops_t *erxop = enp->en_erxop;
+ unsigned int type_flags[EFX_RX_HASH_NFLAGS];
+ unsigned int type_nflags;
+ efx_rx_hash_type_t type_check;
+ unsigned int i;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ /*
+ * Legacy flags and modern bits cannot be
+ * used at the same time in the hash type.
+ */
+ if ((type & EFX_RX_HASH_LEGACY_MASK) &&
+ (type & ~EFX_RX_HASH_LEGACY_MASK)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Translate legacy flags to the new representation
+ * so that chip-specific handlers will consider the
+ * new flags only.
+ */
+ if (type & EFX_RX_HASH_IPV4) {
+ type |= EFX_RX_HASH(IPV4, 2TUPLE);
+ type |= EFX_RX_HASH(IPV4_TCP, 2TUPLE);
+ type |= EFX_RX_HASH(IPV4_UDP, 2TUPLE);
+ }
+
+ if (type & EFX_RX_HASH_TCPIPV4)
+ type |= EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+
+ if (type & EFX_RX_HASH_IPV6) {
+ type |= EFX_RX_HASH(IPV6, 2TUPLE);
+ type |= EFX_RX_HASH(IPV6_TCP, 2TUPLE);
+ type |= EFX_RX_HASH(IPV6_UDP, 2TUPLE);
+ }
+
+ if (type & EFX_RX_HASH_TCPIPV6)
+ type |= EFX_RX_HASH(IPV6_TCP, 4TUPLE);
+
+ type &= ~EFX_RX_HASH_LEGACY_MASK;
+ type_check = type;
+
+ /*
+ * Get the list of supported hash flags and sanitise the input.
+ */
+ rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags, &type_nflags);
+ if (rc != 0)
+ goto fail2;
+
+ for (i = 0; i < type_nflags; ++i) {
+ if ((type_check & type_flags[i]) == type_flags[i])
+ type_check &= ~(type_flags[i]);
+ }
+
+ if (type_check != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
if (erxop->erxo_scale_mode_set != NULL) {
if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg,
type, insert)) != 0)
- goto fail1;
+ goto fail4;
}
return (0);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
@@ -594,7 +751,7 @@ efx_rx_qcreate_internal(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const efx_rxq_type_data_t *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
@@ -655,8 +812,8 @@ efx_rx_qcreate(
__in efx_evq_t *eep,
__deref_out efx_rxq_t **erpp)
{
- return efx_rx_qcreate_internal(enp, index, label, type, 0, esmp, ndescs,
- id, flags, eep, erpp);
+ return efx_rx_qcreate_internal(enp, index, label, type, NULL,
+ esmp, ndescs, id, flags, eep, erpp);
}
#if EFSYS_OPT_RX_PACKED_STREAM
@@ -672,13 +829,71 @@ efx_rx_qcreate_packed_stream(
__in efx_evq_t *eep,
__deref_out efx_rxq_t **erpp)
{
+ efx_rxq_type_data_t type_data;
+
+ memset(&type_data, 0, sizeof(type_data));
+
+ type_data.ertd_packed_stream.eps_buf_size = ps_buf_size;
+
return efx_rx_qcreate_internal(enp, index, label,
- EFX_RXQ_TYPE_PACKED_STREAM, ps_buf_size, esmp, ndescs,
+ EFX_RXQ_TYPE_PACKED_STREAM, &type_data, esmp, ndescs,
0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp);
}
#endif
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate_es_super_buffer(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t n_bufs_per_desc,
+ __in uint32_t max_dma_len,
+ __in uint32_t buf_stride,
+ __in uint32_t hol_block_timeout,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ efx_rc_t rc;
+ efx_rxq_type_data_t type_data;
+
+ if (hol_block_timeout > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ memset(&type_data, 0, sizeof(type_data));
+
+ type_data.ertd_es_super_buffer.eessb_bufs_per_desc = n_bufs_per_desc;
+ type_data.ertd_es_super_buffer.eessb_max_dma_len = max_dma_len;
+ type_data.ertd_es_super_buffer.eessb_buf_stride = buf_stride;
+ type_data.ertd_es_super_buffer.eessb_hol_block_timeout =
+ hol_block_timeout;
+
+ rc = efx_rx_qcreate_internal(enp, index, label,
+ EFX_RXQ_TYPE_ES_SUPER_BUFFER, &type_data, esmp, ndescs,
+ 0 /* id unused on EF10 */, flags, eep, erpp);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif
+
+
void
efx_rx_qdestroy(
__in efx_rxq_t *erp)
@@ -875,6 +1090,10 @@ siena_rx_scale_mode_set(
__in efx_rx_hash_type_t type,
__in boolean_t insert)
{
+ efx_rx_hash_type_t type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE);
+ efx_rx_hash_type_t type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+ efx_rx_hash_type_t type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE);
+ efx_rx_hash_type_t type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
efx_rc_t rc;
if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
@@ -889,12 +1108,12 @@ siena_rx_scale_mode_set(
case EFX_RX_HASHALG_TOEPLITZ:
EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
- type & EFX_RX_HASH_IPV4,
- type & EFX_RX_HASH_TCPIPV4);
+ (type & type_ipv4) == type_ipv4,
+ (type & type_ipv4_tcp) == type_ipv4_tcp);
EFX_RX_TOEPLITZ_IPV6_HASH(enp,
- type & EFX_RX_HASH_IPV6,
- type & EFX_RX_HASH_TCPIPV6,
+ (type & type_ipv6) == type_ipv6,
+ (type & type_ipv6_tcp) == type_ipv6_tcp,
rc);
if (rc != 0)
goto fail2;
@@ -1320,7 +1539,7 @@ siena_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const efx_rxq_type_data_t *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
diff --git a/drivers/net/sfc/base/efx_sram.c b/drivers/net/sfc/base/efx_sram.c
index 1f0ba0a9..7851ff13 100644
--- a/drivers/net/sfc/base/efx_sram.c
+++ b/drivers/net/sfc/base/efx_sram.c
@@ -25,9 +25,10 @@ efx_sram_buf_tbl_set(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD) {
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2) {
/*
* FIXME: the efx_sram_buf_tbl_*() functionality needs to be
* pulled inside the Falcon/Siena queue create/destroy code,
@@ -39,7 +40,7 @@ efx_sram_buf_tbl_set(
return (0);
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
if (stop >= EFX_BUF_TBL_SIZE) {
rc = EFBIG;
@@ -147,9 +148,10 @@ efx_sram_buf_tbl_clear(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD) {
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2) {
/*
* FIXME: the efx_sram_buf_tbl_*() functionality needs to be
* pulled inside the Falcon/Siena queue create/destroy code,
@@ -161,7 +163,7 @@ efx_sram_buf_tbl_clear(
return;
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
diff --git a/drivers/net/sfc/base/efx_tunnel.c b/drivers/net/sfc/base/efx_tunnel.c
index 25fa976f..399fd540 100644
--- a/drivers/net/sfc/base/efx_tunnel.c
+++ b/drivers/net/sfc/base/efx_tunnel.c
@@ -17,20 +17,20 @@ static const efx_tunnel_ops_t __efx_tunnel_dummy_ops = {
};
#endif /* EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON */
-#if EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn boolean_t
-medford_udp_encap_supported(
+ef10_udp_encap_supported(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-medford_tunnel_reconfigure(
+ef10_tunnel_reconfigure(
__in efx_nic_t *enp);
-static const efx_tunnel_ops_t __efx_tunnel_medford_ops = {
- medford_udp_encap_supported, /* eto_udp_encap_supported */
- medford_tunnel_reconfigure, /* eto_reconfigure */
+static const efx_tunnel_ops_t __efx_tunnel_ef10_ops = {
+ ef10_udp_encap_supported, /* eto_udp_encap_supported */
+ ef10_tunnel_reconfigure, /* eto_reconfigure */
};
-#endif /* EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
static __checkReturn efx_rc_t
efx_mcdi_set_tunnel_encap_udp_ports(
@@ -161,10 +161,16 @@ efx_tunnel_init(
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- etop = &__efx_tunnel_medford_ops;
+ etop = &__efx_tunnel_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ etop = &__efx_tunnel_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -394,9 +400,9 @@ fail1:
return (rc);
}
-#if EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn boolean_t
-medford_udp_encap_supported(
+ef10_udp_encap_supported(
__in efx_nic_t *enp)
{
const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
@@ -410,7 +416,7 @@ medford_udp_encap_supported(
}
static __checkReturn efx_rc_t
-medford_tunnel_reconfigure(
+ef10_tunnel_reconfigure(
__in efx_nic_t *enp)
{
efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
@@ -423,7 +429,7 @@ medford_tunnel_reconfigure(
memcpy(&etc, etcp, sizeof (etc));
EFSYS_UNLOCK(enp->en_eslp, state);
- if (medford_udp_encap_supported(enp) == B_FALSE) {
+ if (ef10_udp_encap_supported(enp) == B_FALSE) {
/*
* It is OK to apply empty UDP tunnel ports when UDP
* tunnel encapsulations are not supported - just nothing
@@ -458,6 +464,6 @@ fail1:
return (rc);
}
-#endif /* EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
#endif /* EFSYS_OPT_TUNNEL */
diff --git a/drivers/net/sfc/base/efx_tx.c b/drivers/net/sfc/base/efx_tx.c
index 4e02c869..da37580a 100644
--- a/drivers/net/sfc/base/efx_tx.c
+++ b/drivers/net/sfc/base/efx_tx.c
@@ -117,6 +117,7 @@ static const efx_tx_ops_t __efx_tx_siena_ops = {
NULL, /* etxo_qdesc_tso_create */
NULL, /* etxo_qdesc_tso2_create */
NULL, /* etxo_qdesc_vlantci_create */
+ NULL, /* etxo_qdesc_checksum_create */
#if EFSYS_OPT_QSTATS
siena_tx_qstats_update, /* etxo_qstats_update */
#endif
@@ -143,6 +144,7 @@ static const efx_tx_ops_t __efx_tx_hunt_ops = {
ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+ ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */
#if EFSYS_OPT_QSTATS
ef10_tx_qstats_update, /* etxo_qstats_update */
#endif
@@ -169,12 +171,41 @@ static const efx_tx_ops_t __efx_tx_medford_ops = {
NULL, /* etxo_qdesc_tso_create */
ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+ ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */
#if EFSYS_OPT_QSTATS
ef10_tx_qstats_update, /* etxo_qstats_update */
#endif
};
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+static const efx_tx_ops_t __efx_tx_medford2_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+ ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+
__checkReturn efx_rc_t
efx_tx_init(
__in efx_nic_t *enp)
@@ -214,6 +245,12 @@ efx_tx_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ etxop = &__efx_tx_medford2_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -588,6 +625,7 @@ efx_tx_qdesc_tso_create(
efx_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t mss,
__out_ecount(count) efx_desc_t *edp,
@@ -599,7 +637,8 @@ efx_tx_qdesc_tso2_create(
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL);
- etxop->etxo_qdesc_tso2_create(etp, ipv4_id, tcp_seq, mss, edp, count);
+ etxop->etxo_qdesc_tso2_create(etp, ipv4_id, outer_ipv4_id,
+ tcp_seq, mss, edp, count);
}
void
@@ -617,6 +656,21 @@ efx_tx_qdesc_vlantci_create(
etxop->etxo_qdesc_vlantci_create(etp, tci, edp);
}
+ void
+efx_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_checksum_create != NULL);
+
+ etxop->etxo_qdesc_checksum_create(etp, flags, edp);
+}
+
#if EFSYS_OPT_QSTATS
void
diff --git a/drivers/net/sfc/base/efx_types.h b/drivers/net/sfc/base/efx_types.h
index 0581f67f..65168ab7 100644
--- a/drivers/net/sfc/base/efx_types.h
+++ b/drivers/net/sfc/base/efx_types.h
@@ -329,6 +329,16 @@ extern int fix_lint;
#endif
/*
+ * Saturation arithmetic subtract with minimum equal to zero.
+ *
+ * Use saturating arithmetic to ensure a non-negative result. This
+ * avoids undefined behaviour (and compiler warnings) when used as a
+ * shift count.
+ */
+#define EFX_SSUB(_val, _sub) \
+ ((_val) > (_sub) ? ((_val) - (_sub)) : 0)
+
+/*
* Extract bit field portion [low,high) from the native-endian element
* which contains bits [min,max).
*
@@ -347,8 +357,8 @@ extern int fix_lint;
((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \
0U : \
((_low > _min) ? \
- ((_element) >> (_low - _min)) : \
- ((_element) << (_min - _low))))
+ ((_element) >> EFX_SSUB(_low, _min)) : \
+ ((_element) << EFX_SSUB(_min, _low))))
/*
* Extract bit field portion [low,high) from the 64-bit little-endian
@@ -537,29 +547,29 @@ extern int fix_lint;
(((_low > _max) || (_high < _min)) ? \
0U : \
((_low > _min) ? \
- (((uint64_t)(_value)) << (_low - _min)) : \
- (((uint64_t)(_value)) >> (_min - _low))))
+ (((uint64_t)(_value)) << EFX_SSUB(_low, _min)) :\
+ (((uint64_t)(_value)) >> EFX_SSUB(_min, _low))))
#define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \
(((_low > _max) || (_high < _min)) ? \
0U : \
((_low > _min) ? \
- (((uint32_t)(_value)) << (_low - _min)) : \
- (((uint32_t)(_value)) >> (_min - _low))))
+ (((uint32_t)(_value)) << EFX_SSUB(_low, _min)) :\
+ (((uint32_t)(_value)) >> EFX_SSUB(_min, _low))))
#define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \
(((_low > _max) || (_high < _min)) ? \
0U : \
(uint16_t)((_low > _min) ? \
- ((_value) << (_low - _min)) : \
- ((_value) >> (_min - _low))))
+ ((_value) << EFX_SSUB(_low, _min)) : \
+ ((_value) >> EFX_SSUB(_min, _low))))
#define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \
(((_low > _max) || (_high < _min)) ? \
0U : \
(uint8_t)((_low > _min) ? \
- ((_value) << (_low - _min)) : \
- ((_value) >> (_min - _low))))
+ ((_value) << EFX_SSUB(_low, _min)) : \
+ ((_value) >> EFX_SSUB(_min, _low))))
/*
* Construct bit field portion
@@ -1288,22 +1298,22 @@ extern int fix_lint;
#define EFX_SHIFT64(_bit, _base) \
(((_bit) >= (_base) && (_bit) < (_base) + 64) ? \
- ((uint64_t)1 << ((_bit) - (_base))) : \
+ ((uint64_t)1 << EFX_SSUB((_bit), (_base))) : \
0U)
#define EFX_SHIFT32(_bit, _base) \
(((_bit) >= (_base) && (_bit) < (_base) + 32) ? \
- ((uint32_t)1 << ((_bit) - (_base))) : \
+ ((uint32_t)1 << EFX_SSUB((_bit),(_base))) : \
0U)
#define EFX_SHIFT16(_bit, _base) \
(((_bit) >= (_base) && (_bit) < (_base) + 16) ? \
- (uint16_t)(1 << ((_bit) - (_base))) : \
+ (uint16_t)(1 << EFX_SSUB((_bit), (_base))) : \
0U)
#define EFX_SHIFT8(_bit, _base) \
(((_bit) >= (_base) && (_bit) < (_base) + 8) ? \
- (uint8_t)(1 << ((_bit) - (_base))) : \
+ (uint8_t)(1 << EFX_SSUB((_bit), (_base))) : \
0U)
#define EFX_SET_OWORD_BIT64(_oword, _bit) \
diff --git a/drivers/net/sfc/base/efx_vpd.c b/drivers/net/sfc/base/efx_vpd.c
index 7b8138f3..6d783d74 100644
--- a/drivers/net/sfc/base/efx_vpd.c
+++ b/drivers/net/sfc/base/efx_vpd.c
@@ -44,7 +44,7 @@ static const efx_vpd_ops_t __efx_vpd_siena_ops = {
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
ef10_vpd_init, /* evpdo_init */
@@ -59,7 +59,7 @@ static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
ef10_vpd_fini, /* evpdo_fini */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_vpd_init(
@@ -91,6 +91,12 @@ efx_vpd_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
diff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c
index d03cc138..16ea81d2 100644
--- a/drivers/net/sfc/base/hunt_nic.c
+++ b/drivers/net/sfc/base/hunt_nic.c
@@ -36,7 +36,7 @@ hunt_nic_get_required_pcie_bandwidth(
goto out;
}
- if (port_modes & (1 << TLV_PORT_MODE_40G_40G)) {
+ if (port_modes & (1U << TLV_PORT_MODE_40G_40G)) {
/*
* This needs the full PCIe bandwidth (and could use
* more) - roughly 64 Gbit/s for 8 lanes of Gen3.
@@ -45,9 +45,9 @@ hunt_nic_get_required_pcie_bandwidth(
EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)
goto fail1;
} else {
- if (port_modes & (1 << TLV_PORT_MODE_40G)) {
+ if (port_modes & (1U << TLV_PORT_MODE_40G)) {
max_port_mode = TLV_PORT_MODE_40G;
- } else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) {
+ } else if (port_modes & (1U << TLV_PORT_MODE_10G_10G_10G_10G)) {
max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;
} else {
/* Assume two 10G ports */
@@ -76,90 +76,13 @@ fail1:
hunt_board_cfg(
__in efx_nic_t *enp)
{
- efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint8_t mac_addr[6] = { 0 };
- uint32_t board_type = 0;
- ef10_link_state_t els;
efx_port_t *epp = &(enp->en_port);
- uint32_t port;
- uint32_t pf;
- uint32_t vf;
- uint32_t mask;
uint32_t flags;
uint32_t sysclk, dpcpu_clk;
- uint32_t base, nvec;
uint32_t bandwidth;
efx_rc_t rc;
- if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
- goto fail1;
-
- /*
- * NOTE: The MCDI protocol numbers ports from zero.
- * The common code MCDI interface numbers ports from one.
- */
- emip->emi_port = port + 1;
-
- if ((rc = ef10_external_port_mapping(enp, port,
- &encp->enc_external_port)) != 0)
- goto fail2;
-
- /*
- * Get PCIe function number from firmware (used for
- * per-function privilege and dynamic config info).
- * - PCIe PF: pf = PF number, vf = 0xffff.
- * - PCIe VF: pf = parent PF, vf = VF number.
- */
- if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
- goto fail3;
-
- encp->enc_pf = pf;
- encp->enc_vf = vf;
-
- /* MAC address for this function */
- if (EFX_PCI_FUNCTION_IS_PF(encp)) {
- rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
- if ((rc == 0) && (mac_addr[0] & 0x02)) {
- /*
- * If the static config does not include a global MAC
- * address pool then the board may return a locally
- * administered MAC address (this should only happen on
- * incorrectly programmed boards).
- */
- rc = EINVAL;
- }
- } else {
- rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
- }
- if (rc != 0)
- goto fail4;
-
- EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
-
- /* Board configuration */
- rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
- if (rc != 0) {
- /* Unprivileged functions may not be able to read board cfg */
- if (rc == EACCES)
- board_type = 0;
- else
- goto fail5;
- }
-
- encp->enc_board_type = board_type;
- encp->enc_clk_mult = 1; /* not used for Huntington */
-
- /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
- if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
- goto fail6;
-
- /* Obtain the default PHY advertised capabilities */
- if ((rc = ef10_phy_get_link(enp, &els)) != 0)
- goto fail7;
- epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
- epp->ep_adv_cap_mask = els.els_adv_cap_mask;
-
/*
* Enable firmware workarounds for hardware errata.
* Expected responses are:
@@ -187,7 +110,7 @@ hunt_board_cfg(
else if ((rc == ENOTSUP) || (rc == ENOENT))
encp->enc_bug35388_workaround = B_FALSE;
else
- goto fail8;
+ goto fail1;
/*
* If the bug41750 workaround is enabled, then do not test interrupts,
@@ -206,7 +129,7 @@ hunt_board_cfg(
} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
encp->enc_bug41750_workaround = B_FALSE;
} else {
- goto fail9;
+ goto fail2;
}
if (EFX_PCI_FUNCTION_IS_VF(encp)) {
/* Interrupt testing does not work for VFs. See bug50084. */
@@ -244,12 +167,12 @@ hunt_board_cfg(
} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
encp->enc_bug26807_workaround = B_FALSE;
} else {
- goto fail10;
+ goto fail3;
}
/* Get clock frequencies (in MHz). */
if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
- goto fail11;
+ goto fail4;
/*
* The Huntington timer quantum is 1536 sysclk cycles, documented for
@@ -266,80 +189,23 @@ hunt_board_cfg(
encp->enc_bug61265_workaround = B_FALSE; /* Medford only */
- /* Check capabilities of running datapath firmware */
- if ((rc = ef10_get_datapath_caps(enp)) != 0)
- goto fail12;
-
/* Alignment for receive packet DMA buffers */
encp->enc_rx_buf_align_start = 1;
encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
- /* Alignment for WPTR updates */
- encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
-
- /*
- * Maximum number of exclusive RSS contexts which can be allocated. The
- * hardware supports 64, but 6 are reserved for shared contexts. They
- * are a global resource so not all may be available.
- */
- encp->enc_rx_scale_max_exclusive_contexts = 58;
-
- encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
- /* No boundary crossing limits */
- encp->enc_tx_dma_desc_boundary = 0;
-
- /*
- * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
- * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
- * resources (allocated to this PCIe function), which is zero until
- * after we have allocated VIs.
- */
- encp->enc_evq_limit = 1024;
- encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
- encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
-
/*
* The workaround for bug35388 uses the top bit of transmit queue
* descriptor writes, preventing the use of 4096 descriptor TXQs.
*/
encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096;
- encp->enc_buftbl_limit = 0xFFFFFFFF;
-
+ EFX_STATIC_ASSERT(HUNT_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;
- /*
- * Get the current privilege mask. Note that this may be modified
- * dynamically, so this value is informational only. DO NOT use
- * the privilege mask to check for sufficient privileges, as that
- * can result in time-of-check/time-of-use bugs.
- */
- if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
- goto fail13;
- encp->enc_privilege_mask = mask;
-
- /* Get interrupt vector limits */
- if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
- if (EFX_PCI_FUNCTION_IS_PF(encp))
- goto fail14;
-
- /* Ignore error (cannot query vector limits from a VF). */
- base = 0;
- nvec = 1024;
- }
- encp->enc_intr_vec_base = base;
- encp->enc_intr_limit = nvec;
-
- /*
- * Maximum number of bytes into the frame the TCP header can start for
- * firmware assisted TSO to work.
- */
- encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
-
if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
- goto fail15;
+ goto fail5;
encp->enc_required_pcie_bandwidth_mbps = bandwidth;
/* All Huntington devices have a PCIe Gen3, 8 lane connector */
@@ -347,26 +213,6 @@ hunt_board_cfg(
return (0);
-fail15:
- EFSYS_PROBE(fail15);
-fail14:
- EFSYS_PROBE(fail14);
-fail13:
- EFSYS_PROBE(fail13);
-fail12:
- EFSYS_PROBE(fail12);
-fail11:
- EFSYS_PROBE(fail11);
-fail10:
- EFSYS_PROBE(fail10);
-fail9:
- EFSYS_PROBE(fail9);
-fail8:
- EFSYS_PROBE(fail8);
-fail7:
- EFSYS_PROBE(fail7);
-fail6:
- EFSYS_PROBE(fail6);
fail5:
EFSYS_PROBE(fail5);
fail4:
diff --git a/drivers/net/sfc/base/mcdi_mon.c b/drivers/net/sfc/base/mcdi_mon.c
index e4de0dab..940bd026 100644
--- a/drivers/net/sfc/base/mcdi_mon.c
+++ b/drivers/net/sfc/base/mcdi_mon.c
@@ -135,6 +135,10 @@ static const struct mcdi_sensor_map_s {
STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
STAT(Px, I1V8), /* 0x51 IN_I1V8 */
STAT(Px, I2V5), /* 0x52 IN_I2V5 */
+ STAT(Px, I3V3), /* 0x53 IN_I3V3 */
+ STAT(Px, I12V0), /* 0x54 IN_I12V0 */
+ STAT(Px, 1_3V), /* 0x55 IN_1V3 */
+ STAT(Px, I1V3), /* 0x56 IN_I1V3 */
};
#define MCDI_STATIC_SENSOR_ASSERT(_field) \
@@ -477,6 +481,11 @@ mcdi_mon_cfg_build(
encp->enc_mon_type = EFX_MON_SFC92X0;
break;
#endif
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ encp->enc_mon_type = EFX_MON_SFC92X0;
+ break;
+#endif
default:
rc = EINVAL;
goto fail1;
diff --git a/drivers/net/sfc/base/medford2_impl.h b/drivers/net/sfc/base/medford2_impl.h
new file mode 100644
index 00000000..6259a700
--- /dev/null
+++ b/drivers/net/sfc/base/medford2_impl.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_MEDFORD2_IMPL_H
+#define _SYS_MEDFORD2_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifndef ER_EZ_TX_PIOBUF_SIZE
+#define ER_EZ_TX_PIOBUF_SIZE 4096
+#endif
+
+
+#define MEDFORD2_PIOBUF_NBUFS (16)
+#define MEDFORD2_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE)
+
+#define MEDFORD2_MIN_PIO_ALLOC_SIZE (MEDFORD2_PIOBUF_SIZE / 32)
+
+
+extern __checkReturn efx_rc_t
+medford2_board_cfg(
+ __in efx_nic_t *enp);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MEDFORD2_IMPL_H */
diff --git a/drivers/net/sfc/base/medford2_nic.c b/drivers/net/sfc/base/medford2_nic.c
new file mode 100644
index 00000000..7f5ad175
--- /dev/null
+++ b/drivers/net/sfc/base/medford2_nic.c
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_MEDFORD2
+
+static __checkReturn efx_rc_t
+medford2_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t current_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /* FIXME: support new Medford2 dynamic port modes */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ &current_mode)) != 0) {
+ /* No port mode info available. */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ &bandwidth)) != 0)
+ goto fail1;
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+medford2_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t end_padding;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /*
+ * Interrupt testing does not work for VFs on Medford2.
+ * See bug50084 and bug71432 comment 21.
+ */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /* Chained multicast is always enabled on Medford2 */
+ encp->enc_bug26807_workaround = B_TRUE;
+
+ /*
+ * If the bug61265 workaround is enabled, then interrupt holdoff timers
+ * cannot be controlled by timer table writes, so MCDI must be used
+ * (timer table writes can still be used for wakeup timers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug61265_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug61265_workaround = B_FALSE;
+ else
+ goto fail1;
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail2;
+
+ /*
+ * The Medford2 timer quantum is 1536 dpcpu_clk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+
+ /* Get the RX DMA end padding alignment configuration */
+ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
+ if (rc != EACCES)
+ goto fail3;
+
+ /* Assume largest tail padding size supported by hardware */
+ end_padding = 256;
+ }
+ encp->enc_rx_buf_align_end = end_padding;
+
+ /*
+ * The maximum supported transmit queue size is 2048. TXQs with 4096
+ * descriptors are not supported as the top bit is used for vfifo
+ * stuffing.
+ */
+ encp->enc_txq_max_ndescs = 2048;
+
+ EFX_STATIC_ASSERT(MEDFORD2_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
+ encp->enc_piobuf_limit = MEDFORD2_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = MEDFORD2_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = MEDFORD2_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Medford2 stores a single global copy of VPD, not per-PF as on
+ * Huntington.
+ */
+ encp->enc_vpd_is_global = B_TRUE;
+
+ rc = medford2_nic_get_required_pcie_bandwidth(enp, &bandwidth);
+ if (rc != 0)
+ goto fail4;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c
index 1365e9e3..6dc895f5 100644
--- a/drivers/net/sfc/base/medford_nic.c
+++ b/drivers/net/sfc/base/medford_nic.c
@@ -11,64 +11,6 @@
#if EFSYS_OPT_MEDFORD
static __checkReturn efx_rc_t
-efx_mcdi_get_rxdp_config(
- __in efx_nic_t *enp,
- __out uint32_t *end_paddingp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
- MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
- uint32_t end_padding;
- efx_rc_t rc;
-
- memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
- GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
- /* RX DMA end padding is disabled */
- end_padding = 0;
- } else {
- switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
- GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
- case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
- end_padding = 64;
- break;
- case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
- end_padding = 128;
- break;
- case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
- end_padding = 256;
- break;
- default:
- rc = ENOTSUP;
- goto fail2;
- }
- }
-
- *end_paddingp = end_padding;
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
medford_nic_get_required_pcie_bandwidth(
__in efx_nic_t *enp,
__out uint32_t *bandwidth_mbpsp)
@@ -104,104 +46,13 @@ fail1:
medford_board_cfg(
__in efx_nic_t *enp)
{
- efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint8_t mac_addr[6] = { 0 };
- uint32_t board_type = 0;
- ef10_link_state_t els;
- efx_port_t *epp = &(enp->en_port);
- uint32_t port;
- uint32_t pf;
- uint32_t vf;
- uint32_t mask;
uint32_t sysclk, dpcpu_clk;
- uint32_t base, nvec;
uint32_t end_padding;
uint32_t bandwidth;
efx_rc_t rc;
/*
- * FIXME: Likely to be incomplete and incorrect.
- * Parts of this should be shared with Huntington.
- */
-
- if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
- goto fail1;
-
- /*
- * NOTE: The MCDI protocol numbers ports from zero.
- * The common code MCDI interface numbers ports from one.
- */
- emip->emi_port = port + 1;
-
- if ((rc = ef10_external_port_mapping(enp, port,
- &encp->enc_external_port)) != 0)
- goto fail2;
-
- /*
- * Get PCIe function number from firmware (used for
- * per-function privilege and dynamic config info).
- * - PCIe PF: pf = PF number, vf = 0xffff.
- * - PCIe VF: pf = parent PF, vf = VF number.
- */
- if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
- goto fail3;
-
- encp->enc_pf = pf;
- encp->enc_vf = vf;
-
- /* MAC address for this function */
- if (EFX_PCI_FUNCTION_IS_PF(encp)) {
- rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
-#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
- /*
- * Disable static config checking for Medford NICs, ONLY
- * for manufacturing test and setup at the factory, to
- * allow the static config to be installed.
- */
-#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
- if ((rc == 0) && (mac_addr[0] & 0x02)) {
- /*
- * If the static config does not include a global MAC
- * address pool then the board may return a locally
- * administered MAC address (this should only happen on
- * incorrectly programmed boards).
- */
- rc = EINVAL;
- }
-#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
- } else {
- rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
- }
- if (rc != 0)
- goto fail4;
-
- EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
-
- /* Board configuration */
- rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
- if (rc != 0) {
- /* Unprivileged functions may not be able to read board cfg */
- if (rc == EACCES)
- board_type = 0;
- else
- goto fail5;
- }
-
- encp->enc_board_type = board_type;
- encp->enc_clk_mult = 1; /* not used for Medford */
-
- /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
- if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
- goto fail6;
-
- /* Obtain the default PHY advertised capabilities */
- if ((rc = ef10_phy_get_link(enp, &els)) != 0)
- goto fail7;
- epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
- epp->ep_adv_cap_mask = els.els_adv_cap_mask;
-
- /*
* Enable firmware workarounds for hardware errata.
* Expected responses are:
* - 0 (zero):
@@ -220,8 +71,8 @@ medford_board_cfg(
if (EFX_PCI_FUNCTION_IS_VF(encp)) {
/*
- * Interrupt testing does not work for VFs. See bug50084.
- * FIXME: Does this still apply to Medford?
+ * Interrupt testing does not work for VFs. See bug50084 and
+ * bug71432 comment 21.
*/
encp->enc_bug41750_workaround = B_TRUE;
}
@@ -241,11 +92,11 @@ medford_board_cfg(
else if ((rc == ENOTSUP) || (rc == ENOENT))
encp->enc_bug61265_workaround = B_FALSE;
else
- goto fail8;
+ goto fail1;
/* Get clock frequencies (in MHz). */
if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
- goto fail9;
+ goto fail2;
/*
* The Medford timer quantum is 1536 dpcpu_clk cycles, documented for
@@ -255,47 +106,19 @@ medford_board_cfg(
encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
- /* Check capabilities of running datapath firmware */
- if ((rc = ef10_get_datapath_caps(enp)) != 0)
- goto fail10;
-
/* Alignment for receive packet DMA buffers */
encp->enc_rx_buf_align_start = 1;
/* Get the RX DMA end padding alignment configuration */
if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
if (rc != EACCES)
- goto fail11;
+ goto fail3;
/* Assume largest tail padding size supported by hardware */
end_padding = 256;
}
encp->enc_rx_buf_align_end = end_padding;
- /* Alignment for WPTR updates */
- encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
-
- /*
- * Maximum number of exclusive RSS contexts which can be allocated. The
- * hardware supports 64, but 6 are reserved for shared contexts. They
- * are a global resource so not all may be available.
- */
- encp->enc_rx_scale_max_exclusive_contexts = 58;
-
- encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
- /* No boundary crossing limits */
- encp->enc_tx_dma_desc_boundary = 0;
-
- /*
- * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
- * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
- * resources (allocated to this PCIe function), which is zero until
- * after we have allocated VIs.
- */
- encp->enc_evq_limit = 1024;
- encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
- encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
-
/*
* The maximum supported transmit queue size is 2048. TXQs with 4096
* descriptors are not supported as the top bit is used for vfifo
@@ -303,41 +126,12 @@ medford_board_cfg(
*/
encp->enc_txq_max_ndescs = 2048;
- encp->enc_buftbl_limit = 0xFFFFFFFF;
-
+ EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS;
encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE;
encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE;
/*
- * Get the current privilege mask. Note that this may be modified
- * dynamically, so this value is informational only. DO NOT use
- * the privilege mask to check for sufficient privileges, as that
- * can result in time-of-check/time-of-use bugs.
- */
- if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
- goto fail12;
- encp->enc_privilege_mask = mask;
-
- /* Get interrupt vector limits */
- if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
- if (EFX_PCI_FUNCTION_IS_PF(encp))
- goto fail13;
-
- /* Ignore error (cannot query vector limits from a VF). */
- base = 0;
- nvec = 1024;
- }
- encp->enc_intr_vec_base = base;
- encp->enc_intr_limit = nvec;
-
- /*
- * Maximum number of bytes into the frame the TCP header can start for
- * firmware assisted TSO to work.
- */
- encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
-
- /*
* Medford stores a single global copy of VPD, not per-PF as on
* Huntington.
*/
@@ -345,32 +139,12 @@ medford_board_cfg(
rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth);
if (rc != 0)
- goto fail14;
+ goto fail4;
encp->enc_required_pcie_bandwidth_mbps = bandwidth;
encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
return (0);
-fail14:
- EFSYS_PROBE(fail14);
-fail13:
- EFSYS_PROBE(fail13);
-fail12:
- EFSYS_PROBE(fail12);
-fail11:
- EFSYS_PROBE(fail11);
-fail10:
- EFSYS_PROBE(fail10);
-fail9:
- EFSYS_PROBE(fail9);
-fail8:
- EFSYS_PROBE(fail8);
-fail7:
- EFSYS_PROBE(fail7);
-fail6:
- EFSYS_PROBE(fail6);
-fail5:
- EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
diff --git a/drivers/net/sfc/base/meson.build b/drivers/net/sfc/base/meson.build
index f1e49735..da2bf44d 100644
--- a/drivers/net/sfc/base/meson.build
+++ b/drivers/net/sfc/base/meson.build
@@ -34,6 +34,7 @@ sources = [
'siena_vpd.c',
'ef10_ev.c',
'ef10_filter.c',
+ 'ef10_image.c',
'ef10_intr.c',
'ef10_mac.c',
'ef10_mcdi.c',
@@ -44,7 +45,8 @@ sources = [
'ef10_tx.c',
'ef10_vpd.c',
'hunt_nic.c',
- 'medford_nic.c'
+ 'medford_nic.c',
+ 'medford2_nic.c'
]
extra_flags = [
diff --git a/drivers/net/sfc/base/siena_flash.h b/drivers/net/sfc/base/siena_flash.h
index 91a9fe05..74bb9496 100644
--- a/drivers/net/sfc/base/siena_flash.h
+++ b/drivers/net/sfc/base/siena_flash.h
@@ -103,7 +103,14 @@ typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
/* the key, or 0xffff if unsigned. (Otherwise set to 0) */
efx_byte_t mumfw_subtype; /* MUM & SUC images: subtype. (Otherwise set to 0) */
efx_byte_t reserved_b[3]; /* (set to 0) */
- efx_dword_t reserved_c[6]; /* (set to 0) */
+ efx_dword_t security_level; /* This number increases every time a serious security flaw */
+ /* is fixed. A secure NIC may not downgrade to any image */
+ /* with a lower security level than the current image. */
+ /* Note: The number in this header should only be used for */
+ /* determining the level of new images, not to determine */
+ /* the level of the current image as this header is not */
+ /* protected by a CMAC. */
+ efx_dword_t reserved_c[5]; /* (set to 0) */
} siena_mc_boot_hdr_t;
#define SIENA_MC_BOOT_HDR_PADDING \
diff --git a/drivers/net/sfc/base/siena_mac.c b/drivers/net/sfc/base/siena_mac.c
index 904e03ed..f8857cdd 100644
--- a/drivers/net/sfc/base/siena_mac.c
+++ b/drivers/net/sfc/base/siena_mac.c
@@ -245,16 +245,28 @@ siena_mac_stats_update(
__inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
__inout_opt uint32_t *generationp)
{
- efx_qword_t value;
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_qword_t generation_start;
efx_qword_t generation_end;
+ efx_qword_t value;
+ efx_rc_t rc;
- _NOTE(ARGUNUSED(enp))
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
+ /* MAC stats count too small */
+ rc = ENOSPC;
+ goto fail1;
+ }
+ if (EFSYS_MEM_SIZE(esmp) <
+ (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) {
+ /* DMA buffer too small */
+ rc = ENOSPC;
+ goto fail2;
+ }
/* Read END first so we don't race with the MC */
- EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
- SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
- &generation_end);
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
+ SIENA_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1),
+ &generation_end);
EFSYS_MEM_READ_BARRIER();
/* TX */
@@ -422,7 +434,7 @@ siena_mac_stats_update(
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
- EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
EFSYS_MEM_READ_BARRIER();
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
&generation_start);
@@ -437,6 +449,13 @@ siena_mac_stats_update(
*generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
#endif /* EFSYS_OPT_MAC_STATS */
diff --git a/drivers/net/sfc/base/siena_mcdi.c b/drivers/net/sfc/base/siena_mcdi.c
index ef844591..d727c187 100644
--- a/drivers/net/sfc/base/siena_mcdi.c
+++ b/drivers/net/sfc/base/siena_mcdi.c
@@ -124,17 +124,21 @@ siena_mcdi_read_response(
{
efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
unsigned int pdur;
- unsigned int pos;
+ unsigned int pos = 0;
efx_dword_t data;
+ size_t remaining = length;
EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
pdur = SIENA_MCDI_PDU(emip);
- for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ while (remaining > 0) {
+ size_t chunk = MIN(remaining, sizeof (data));
+
EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
pdur + ((offset + pos) >> 2), &data, B_FALSE);
- memcpy((uint8_t *)bufferp + pos, &data,
- MIN(sizeof (data), length - pos));
+ memcpy((uint8_t *)bufferp + pos, &data, chunk);
+ pos += chunk;
+ remaining -= chunk;
}
}
diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c
index f223c9be..31eef80b 100644
--- a/drivers/net/sfc/base/siena_nic.c
+++ b/drivers/net/sfc/base/siena_nic.c
@@ -66,6 +66,10 @@ siena_board_cfg(
uint32_t nevq, nrxq, ntxq;
efx_rc_t rc;
+ /* Siena has a fixed 8Kbyte VI window size */
+ EFX_STATIC_ASSERT(1U << EFX_VI_WINDOW_SHIFT_8K == 8192);
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
+
/* External port identifier using one-based port numbering */
encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port;
@@ -114,6 +118,18 @@ siena_board_cfg(
/* There is one RSS context per function */
encp->enc_rx_scale_max_exclusive_contexts = 1;
+ encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_LFSR);
+ encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_TOEPLITZ);
+
+ /*
+ * It is always possible to use port numbers
+ * as the input data for hash computation.
+ */
+ encp->enc_rx_scale_l4_hash_supported = B_TRUE;
+
+ /* There is no support for additional RSS modes */
+ encp->enc_rx_scale_additional_modes_supported = B_FALSE;
+
encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT);
/* Fragments must not span 4k boundaries. */
encp->enc_tx_dma_desc_boundary = 4096;
@@ -145,6 +161,8 @@ siena_board_cfg(
encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
encp->enc_rx_packed_stream_supported = B_FALSE;
encp->enc_rx_var_packed_stream_supported = B_FALSE;
+ encp->enc_rx_es_super_buffer_supported = B_FALSE;
+ encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
/* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */
encp->enc_required_pcie_bandwidth_mbps = 2 * 10000;
@@ -152,6 +170,12 @@ siena_board_cfg(
encp->enc_nvram_update_verify_result_supported = B_FALSE;
+ encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
+
+ encp->enc_filter_action_flag_supported = B_FALSE;
+ encp->enc_filter_action_mark_supported = B_FALSE;
+ encp->enc_filter_action_mark_max = 0;
+
return (0);
fail2:
diff --git a/drivers/net/sfc/base/siena_nvram.c b/drivers/net/sfc/base/siena_nvram.c
index e72bba0b..8cdd2df7 100644
--- a/drivers/net/sfc/base/siena_nvram.c
+++ b/drivers/net/sfc/base/siena_nvram.c
@@ -304,15 +304,20 @@ siena_nvram_get_dynamic_cfg(
if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
goto fail1;
+ if (size < SIENA_NVRAM_CHUNK) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg);
if (dcfg == NULL) {
rc = ENOMEM;
- goto fail2;
+ goto fail3;
}
if ((rc = siena_nvram_partn_read(enp, partn, 0,
(caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0)
- goto fail3;
+ goto fail4;
/* Verify the magic */
if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0)
@@ -347,7 +352,7 @@ siena_nvram_get_dynamic_cfg(
if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
(caddr_t)dcfg + SIENA_NVRAM_CHUNK,
region - SIENA_NVRAM_CHUNK)) != 0)
- goto fail4;
+ goto fail5;
}
/* Verify checksum */
@@ -389,13 +394,15 @@ done:
return (0);
+fail5:
+ EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
-fail3:
- EFSYS_PROBE(fail3);
EFSYS_KMEM_FREE(enp->en_esip, size, dcfg);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
diff --git a/drivers/net/sfc/base/siena_phy.c b/drivers/net/sfc/base/siena_phy.c
index d638646b..4b2190d3 100644
--- a/drivers/net/sfc/base/siena_phy.c
+++ b/drivers/net/sfc/base/siena_phy.c
@@ -534,6 +534,11 @@ siena_phy_stats_update(
MC_CMD_PHY_STATS_OUT_DMA_LEN)];
efx_rc_t rc;
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_PHY_STATS_SIZE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_PHY_STATS;
req.emr_in_buf = payload;
@@ -550,7 +555,7 @@ siena_phy_stats_update(
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail1;
+ goto fail2;
}
EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN);
@@ -559,6 +564,8 @@ siena_phy_stats_update(
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
diff --git a/drivers/net/sfc/base/siena_vpd.c b/drivers/net/sfc/base/siena_vpd.c
index f188eb58..ebb12abf 100644
--- a/drivers/net/sfc/base/siena_vpd.c
+++ b/drivers/net/sfc/base/siena_vpd.c
@@ -36,21 +36,26 @@ siena_vpd_get_static(
if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
goto fail1;
+ if (size < SIENA_NVRAM_CHUNK) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg);
if (scfg == NULL) {
rc = ENOMEM;
- goto fail2;
+ goto fail3;
}
if ((rc = siena_nvram_partn_read(enp, partn, 0,
(caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0)
- goto fail3;
+ goto fail4;
/* Verify the magic number */
if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) !=
SIENA_MC_STATIC_CONFIG_MAGIC) {
rc = EINVAL;
- goto fail4;
+ goto fail5;
}
/* All future versions of the structure must be backwards compatible */
@@ -64,7 +69,7 @@ siena_vpd_get_static(
if (hdr_length > size || vpd_offset > size || vpd_length > size ||
vpd_length + vpd_offset > size) {
rc = EINVAL;
- goto fail5;
+ goto fail6;
}
/* Read the remainder of scfg + static vpd */
@@ -73,7 +78,7 @@ siena_vpd_get_static(
if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
(caddr_t)scfg + SIENA_NVRAM_CHUNK,
region - SIENA_NVRAM_CHUNK)) != 0)
- goto fail6;
+ goto fail7;
}
/* Verify checksum */
@@ -82,7 +87,7 @@ siena_vpd_get_static(
cksum += ((uint8_t *)scfg)[pos];
if (cksum != 0) {
rc = EINVAL;
- goto fail7;
+ goto fail8;
}
if (vpd_length == 0)
@@ -92,7 +97,7 @@ siena_vpd_get_static(
EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd);
if (svpd == NULL) {
rc = ENOMEM;
- goto fail8;
+ goto fail9;
}
memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length);
}
@@ -104,6 +109,8 @@ siena_vpd_get_static(
return (0);
+fail9:
+ EFSYS_PROBE(fail9);
fail8:
EFSYS_PROBE(fail8);
fail7:
@@ -114,11 +121,11 @@ fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
-fail3:
- EFSYS_PROBE(fail3);
EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
diff --git a/drivers/net/sfc/efsys.h b/drivers/net/sfc/efsys.h
index c7a54c3b..b9d2df58 100644
--- a/drivers/net/sfc/efsys.h
+++ b/drivers/net/sfc/efsys.h
@@ -26,6 +26,7 @@
#include <rte_io.h>
#include "sfc_debug.h"
+#include "sfc_log.h"
#ifdef __cplusplus
extern "C" {
@@ -119,6 +120,8 @@ prefetch_read_once(const volatile void *addr)
#define __out_ecount_opt(_n)
#define __out_bcount(_n)
#define __out_bcount_opt(_n)
+#define __out_bcount_part(_n, _l)
+#define __out_bcount_part_opt(_n, _l)
#define __deref_out
@@ -148,6 +151,8 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_HUNTINGTON 1
/* Enable SFN8xxx support */
#define EFSYS_OPT_MEDFORD 1
+/* Enable SFN2xxx support */
+#define EFSYS_OPT_MEDFORD2 1
#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
#define EFSYS_OPT_CHECK_REG 1
#else
@@ -161,7 +166,7 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_MAC_STATS 1
-#define EFSYS_OPT_LOOPBACK 0
+#define EFSYS_OPT_LOOPBACK 1
#define EFSYS_OPT_MON_MCDI 0
#define EFSYS_OPT_MON_STATS 0
@@ -174,6 +179,7 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_VPD 0
#define EFSYS_OPT_NVRAM 0
#define EFSYS_OPT_BOOTCFG 0
+#define EFSYS_OPT_IMAGE_LAYOUT 0
#define EFSYS_OPT_DIAG 0
#define EFSYS_OPT_RX_SCALE 1
@@ -192,8 +198,12 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_RX_PACKED_STREAM 0
+#define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
+
#define EFSYS_OPT_TUNNEL 1
+#define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
+
/* ID */
typedef struct __efsys_identifier_s efsys_identifier_t;
@@ -370,6 +380,9 @@ typedef struct efsys_mem_s {
} while (B_FALSE)
+#define EFSYS_MEM_SIZE(_esmp) \
+ ((_esmp)->esm_mz->len)
+
#define EFSYS_MEM_ADDR(_esmp) \
((_esmp)->esm_addr)
@@ -721,7 +734,7 @@ typedef uint64_t efsys_stat_t;
#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
do { \
(void)(_esip); \
- RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
+ SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
(_code), (_dword0), (_dword1)); \
_NOTE(CONSTANTCONDITION); \
} while (B_FALSE)
diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index b60a8f58..3aa14c7b 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -30,10 +30,6 @@ extra_flags += [
'-Wbad-function-cast'
]
-# Suppress ICC false positive warning on 'bulk' may be used before its
-# value is set
-extra_flags += '-wd3656'
-
foreach flag: extra_flags
if cc.has_argument(flag)
cflags += flag
@@ -58,6 +54,7 @@ sources = files(
'sfc_flow.c',
'sfc_dp.c',
'sfc_ef10_rx.c',
+ 'sfc_ef10_essb_rx.c',
'sfc_ef10_tx.c'
)
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index ac5fdcaa..6690053f 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -20,6 +20,8 @@
#include "sfc_ev.h"
#include "sfc_rx.h"
#include "sfc_tx.h"
+#include "sfc_kvargs.h"
+#include "sfc_tweak.h"
int
@@ -81,14 +83,23 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
phy_caps |=
(1 << EFX_PHY_CAP_1000FDX) |
(1 << EFX_PHY_CAP_10000FDX) |
- (1 << EFX_PHY_CAP_40000FDX);
+ (1 << EFX_PHY_CAP_25000FDX) |
+ (1 << EFX_PHY_CAP_40000FDX) |
+ (1 << EFX_PHY_CAP_50000FDX) |
+ (1 << EFX_PHY_CAP_100000FDX);
}
if (speeds & ETH_LINK_SPEED_1G)
phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
if (speeds & ETH_LINK_SPEED_10G)
phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
+ if (speeds & ETH_LINK_SPEED_25G)
+ phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
if (speeds & ETH_LINK_SPEED_40G)
phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
+ if (speeds & ETH_LINK_SPEED_50G)
+ phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
+ if (speeds & ETH_LINK_SPEED_100G)
+ phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
return phy_caps;
}
@@ -113,10 +124,12 @@ sfc_check_conf(struct sfc_adapter *sa)
rc = EINVAL;
}
+#if !EFSYS_OPT_LOOPBACK
if (conf->lpbk_mode != 0) {
sfc_err(sa, "Loopback not supported");
rc = EINVAL;
}
+#endif
if (conf->dcb_capability_en != 0) {
sfc_err(sa, "Priority-based flow control not supported");
@@ -250,6 +263,58 @@ sfc_set_drv_limits(struct sfc_adapter *sa)
}
static int
+sfc_set_fw_subvariant(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
+ unsigned int txq_index;
+ efx_nic_fw_subvariant_t req_fw_subvariant;
+ efx_nic_fw_subvariant_t cur_fw_subvariant;
+ int rc;
+
+ if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
+ sfc_info(sa, "no-Tx-checksum subvariant not supported");
+ return 0;
+ }
+
+ for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) {
+ struct sfc_txq_info *txq_info = &sa->txq_info[txq_index];
+
+ if (txq_info->txq != NULL)
+ tx_offloads |= txq_info->txq->offloads;
+ }
+
+ if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
+ else
+ req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
+
+ rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
+ if (rc != 0) {
+ sfc_err(sa, "failed to get FW subvariant: %d", rc);
+ return rc;
+ }
+ sfc_info(sa, "FW subvariant is %u vs required %u",
+ cur_fw_subvariant, req_fw_subvariant);
+
+ if (cur_fw_subvariant == req_fw_subvariant)
+ return 0;
+
+ rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
+ if (rc != 0) {
+ sfc_err(sa, "failed to set FW subvariant %u: %d",
+ req_fw_subvariant, rc);
+ return rc;
+ }
+ sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
+
+ return 0;
+}
+
+static int
sfc_try_start(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp;
@@ -260,6 +325,11 @@ sfc_try_start(struct sfc_adapter *sa)
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
+ sfc_log_init(sa, "set FW subvariant");
+ rc = sfc_set_fw_subvariant(sa);
+ if (rc != 0)
+ goto fail_set_fw_subvariant;
+
sfc_log_init(sa, "set resource limits");
rc = sfc_set_drv_limits(sa);
if (rc != 0)
@@ -326,6 +396,7 @@ fail_tunnel_reconfigure:
fail_nic_init:
fail_set_drv_limits:
+fail_set_fw_subvariant:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
@@ -344,7 +415,7 @@ sfc_start(struct sfc_adapter *sa)
case SFC_ADAPTER_CONFIGURED:
break;
case SFC_ADAPTER_STARTED:
- sfc_info(sa, "already started");
+ sfc_notice(sa, "already started");
return 0;
default:
rc = EINVAL;
@@ -383,7 +454,7 @@ sfc_stop(struct sfc_adapter *sa)
case SFC_ADAPTER_STARTED:
break;
case SFC_ADAPTER_CONFIGURED:
- sfc_info(sa, "already stopped");
+ sfc_notice(sa, "already stopped");
return;
default:
sfc_err(sa, "stop in unexpected state %u", sa->state);
@@ -454,7 +525,7 @@ sfc_schedule_restart(struct sfc_adapter *sa)
else if (rc != 0)
sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
else
- sfc_info(sa, "restart scheduled");
+ sfc_notice(sa, "restart scheduled");
}
int
@@ -530,27 +601,18 @@ sfc_close(struct sfc_adapter *sa)
}
static int
-sfc_mem_bar_init(struct sfc_adapter *sa)
+sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
{
struct rte_eth_dev *eth_dev = sa->eth_dev;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
efsys_bar_t *ebp = &sa->mem_bar;
- unsigned int i;
- struct rte_mem_resource *res;
-
- for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) {
- res = &pci_dev->mem_resource[i];
- if ((res->len != 0) && (res->phys_addr != 0)) {
- /* Found first memory BAR */
- SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
- ebp->esb_rid = i;
- ebp->esb_dev = pci_dev;
- ebp->esb_base = res->addr;
- return 0;
- }
- }
+ struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
- return EFAULT;
+ SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
+ ebp->esb_rid = membar;
+ ebp->esb_dev = pci_dev;
+ ebp->esb_base = res->addr;
+ return 0;
}
static void
@@ -562,7 +624,6 @@ sfc_mem_bar_fini(struct sfc_adapter *sa)
memset(ebp, 0, sizeof(*ebp));
}
-#if EFSYS_OPT_RX_SCALE
/*
* A fixed RSS key which has a property of being symmetric
* (symmetrical flows are distributed to the same CPU)
@@ -576,12 +637,11 @@ static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
};
-#endif
-#if EFSYS_OPT_RX_SCALE
static int
-sfc_set_rss_defaults(struct sfc_adapter *sa)
+sfc_rss_attach(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
int rc;
rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
@@ -596,26 +656,31 @@ sfc_set_rss_defaults(struct sfc_adapter *sa)
if (rc != 0)
goto fail_rx_init;
- rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
+ rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
if (rc != 0)
goto fail_scale_support_get;
- rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
+ rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
if (rc != 0)
goto fail_hash_support_get;
+ rc = sfc_rx_hash_init(sa);
+ if (rc != 0)
+ goto fail_rx_hash_init;
+
efx_rx_fini(sa->nic);
efx_ev_fini(sa->nic);
efx_intr_fini(sa->nic);
- sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
-
- rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
+ rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
return 0;
+fail_rx_hash_init:
fail_hash_support_get:
fail_scale_support_get:
+ efx_rx_fini(sa->nic);
+
fail_rx_init:
efx_ev_fini(sa->nic);
@@ -625,13 +690,12 @@ fail_ev_init:
fail_intr_init:
return rc;
}
-#else
-static int
-sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa)
+
+static void
+sfc_rss_detach(struct sfc_adapter *sa)
{
- return 0;
+ sfc_rx_hash_fini(sa);
}
-#endif
int
sfc_attach(struct sfc_adapter *sa)
@@ -690,9 +754,9 @@ sfc_attach(struct sfc_adapter *sa)
if (rc != 0)
goto fail_port_attach;
- rc = sfc_set_rss_defaults(sa);
+ rc = sfc_rss_attach(sa);
if (rc != 0)
- goto fail_set_rss_defaults;
+ goto fail_rss_attach;
rc = sfc_filter_attach(sa);
if (rc != 0)
@@ -709,7 +773,9 @@ sfc_attach(struct sfc_adapter *sa)
return 0;
fail_filter_attach:
-fail_set_rss_defaults:
+ sfc_rss_detach(sa);
+
+fail_rss_attach:
sfc_port_detach(sa);
fail_port_attach:
@@ -741,6 +807,7 @@ sfc_detach(struct sfc_adapter *sa)
sfc_flow_fini(sa);
sfc_filter_detach(sa);
+ sfc_rss_detach(sa);
sfc_port_detach(sa);
sfc_ev_detach(sa);
sfc_intr_detach(sa);
@@ -749,10 +816,169 @@ sfc_detach(struct sfc_adapter *sa)
sa->state = SFC_ADAPTER_UNINITIALIZED;
}
+static int
+sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ uint32_t *value = opaque;
+
+ if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
+ *value = EFX_FW_VARIANT_DONT_CARE;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
+ *value = EFX_FW_VARIANT_FULL_FEATURED;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
+ *value = EFX_FW_VARIANT_LOW_LATENCY;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
+ *value = EFX_FW_VARIANT_PACKED_STREAM;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
+ *value = EFX_FW_VARIANT_DPDK;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
+{
+ efx_nic_fw_info_t enfi;
+ int rc;
+
+ rc = efx_nic_get_fw_version(sa->nic, &enfi);
+ if (rc != 0)
+ return rc;
+ else if (!enfi.enfi_dpcpu_fw_ids_valid)
+ return ENOTSUP;
+
+ /*
+ * Firmware variant can be uniquely identified by the RxDPCPU
+ * firmware id
+ */
+ switch (enfi.enfi_rx_dpcpu_fw_id) {
+ case EFX_RXDP_FULL_FEATURED_FW_ID:
+ *efv = EFX_FW_VARIANT_FULL_FEATURED;
+ break;
+
+ case EFX_RXDP_LOW_LATENCY_FW_ID:
+ *efv = EFX_FW_VARIANT_LOW_LATENCY;
+ break;
+
+ case EFX_RXDP_PACKED_STREAM_FW_ID:
+ *efv = EFX_FW_VARIANT_PACKED_STREAM;
+ break;
+
+ case EFX_RXDP_DPDK_FW_ID:
+ *efv = EFX_FW_VARIANT_DPDK;
+ break;
+
+ default:
+ /*
+ * Other firmware variants are not considered, since they are
+ * not supported in the device parameters
+ */
+ *efv = EFX_FW_VARIANT_DONT_CARE;
+ break;
+ }
+
+ return 0;
+}
+
+static const char *
+sfc_fw_variant2str(efx_fw_variant_t efv)
+{
+ switch (efv) {
+ case EFX_RXDP_FULL_FEATURED_FW_ID:
+ return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
+ case EFX_RXDP_LOW_LATENCY_FW_ID:
+ return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
+ case EFX_RXDP_PACKED_STREAM_FW_ID:
+ return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
+ case EFX_RXDP_DPDK_FW_ID:
+ return SFC_KVARG_FW_VARIANT_DPDK;
+ default:
+ return "unknown";
+ }
+}
+
+static int
+sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
+{
+ int rc;
+ long value;
+
+ value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
+ sfc_kvarg_long_handler, &value);
+ if (rc != 0)
+ return rc;
+
+ if (value < 0 ||
+ (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
+ sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
+ "was set (%ld);", value);
+ sfc_err(sa, "it must not be less than 0 or greater than %u",
+ EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
+ return EINVAL;
+ }
+
+ sa->rxd_wait_timeout_ns = value;
+ return 0;
+}
+
+static int
+sfc_nic_probe(struct sfc_adapter *sa)
+{
+ efx_nic_t *enp = sa->nic;
+ efx_fw_variant_t preferred_efv;
+ efx_fw_variant_t efv;
+ int rc;
+
+ preferred_efv = EFX_FW_VARIANT_DONT_CARE;
+ rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
+ sfc_kvarg_fv_variant_handler,
+ &preferred_efv);
+ if (rc != 0) {
+ sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
+ return rc;
+ }
+
+ rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
+ if (rc != 0)
+ return rc;
+
+ rc = efx_nic_probe(enp, preferred_efv);
+ if (rc == EACCES) {
+ /* Unprivileged functions cannot set FW variant */
+ rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
+ }
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_get_fw_variant(sa, &efv);
+ if (rc == ENOTSUP) {
+ sfc_warn(sa, "FW variant can not be obtained");
+ return 0;
+ }
+ if (rc != 0)
+ return rc;
+
+ /* Check that firmware variant was changed to the requested one */
+ if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
+ sfc_warn(sa, "FW variant has not changed to the requested %s",
+ sfc_fw_variant2str(preferred_efv));
+ }
+
+ sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
+
+ return 0;
+}
+
int
sfc_probe(struct sfc_adapter *sa)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
+ unsigned int membar;
efx_nic_t *enp;
int rc;
@@ -763,17 +989,17 @@ sfc_probe(struct sfc_adapter *sa)
sa->socket_id = rte_socket_id();
rte_atomic32_init(&sa->restart_required);
- sfc_log_init(sa, "init mem bar");
- rc = sfc_mem_bar_init(sa);
- if (rc != 0)
- goto fail_mem_bar_init;
-
sfc_log_init(sa, "get family");
rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
- &sa->family);
+ &sa->family, &membar);
if (rc != 0)
goto fail_family;
- sfc_log_init(sa, "family is %u", sa->family);
+ sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
+
+ sfc_log_init(sa, "init mem bar");
+ rc = sfc_mem_bar_init(sa, membar);
+ if (rc != 0)
+ goto fail_mem_bar_init;
sfc_log_init(sa, "create nic");
rte_spinlock_init(&sa->nic_lock);
@@ -788,7 +1014,7 @@ sfc_probe(struct sfc_adapter *sa)
goto fail_mcdi_init;
sfc_log_init(sa, "probe nic");
- rc = efx_nic_probe(enp);
+ rc = sfc_nic_probe(sa);
if (rc != 0)
goto fail_nic_probe;
@@ -804,10 +1030,10 @@ fail_mcdi_init:
efx_nic_destroy(enp);
fail_nic_create:
-fail_family:
sfc_mem_bar_fini(sa);
fail_mem_bar_init:
+fail_family:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
@@ -843,3 +1069,35 @@ sfc_unprobe(struct sfc_adapter *sa)
sfc_flow_fini(sa);
sa->state = SFC_ADAPTER_UNINITIALIZED;
}
+
+uint32_t
+sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
+ uint32_t ll_default)
+{
+ size_t lt_prefix_str_size = strlen(lt_prefix_str);
+ size_t lt_str_size_max;
+ char *lt_str = NULL;
+ int ret;
+
+ if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
+ ++lt_prefix_str_size; /* Reserve space for prefix separator */
+ lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
+ } else {
+ return RTE_LOGTYPE_PMD;
+ }
+
+ lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
+ if (lt_str == NULL)
+ return RTE_LOGTYPE_PMD;
+
+ strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
+ lt_str[lt_prefix_str_size - 1] = '.';
+ rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
+ lt_str_size_max - lt_prefix_str_size);
+ lt_str[lt_str_size_max - 1] = '\0';
+
+ ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
+ rte_free(lt_str);
+
+ return (ret < 0) ? RTE_LOGTYPE_PMD : ret;
+}
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index 75575349..51be4403 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -27,11 +27,6 @@
extern "C" {
#endif
-#if EFSYS_OPT_RX_SCALE
-/** RSS hash offloads mask */
-#define SFC_RSS_OFFLOADS (ETH_RSS_IP | ETH_RSS_TCP)
-#endif
-
/*
* +---------------+
* | UNINITIALIZED |<-----------+
@@ -104,7 +99,7 @@ struct sfc_mcdi {
efsys_mem_t mem;
enum sfc_mcdi_state state;
efx_mcdi_transport_t transport;
- bool logging;
+ uint32_t logtype;
uint32_t proxy_handle;
efx_rc_t proxy_result;
};
@@ -156,6 +151,24 @@ struct sfc_port {
uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES];
};
+struct sfc_rss_hf_rte_to_efx {
+ uint64_t rte;
+ efx_rx_hash_type_t efx;
+};
+
+struct sfc_rss {
+ unsigned int channels;
+ efx_rx_scale_context_type_t context_type;
+ efx_rx_hash_support_t hash_support;
+ efx_rx_hash_alg_t hash_alg;
+ unsigned int hf_map_nb_entries;
+ struct sfc_rss_hf_rte_to_efx *hf_map;
+
+ efx_rx_hash_type_t hash_types;
+ unsigned int tbl[EFX_RSS_TBL_SIZE];
+ uint8_t key[EFX_RSS_KEY_SIZE];
+};
+
/* Adapter private data */
struct sfc_adapter {
/*
@@ -170,7 +183,7 @@ struct sfc_adapter {
uint16_t port_id;
struct rte_eth_dev *eth_dev;
struct rte_kvargs *kvargs;
- bool debug_init;
+ uint32_t logtype_main;
int socket_id;
efsys_bar_t mem_bar;
efx_family_t family;
@@ -225,15 +238,9 @@ struct sfc_adapter {
boolean_t tso;
- unsigned int rss_channels;
+ uint32_t rxd_wait_timeout_ns;
-#if EFSYS_OPT_RX_SCALE
- efx_rx_scale_context_type_t rss_support;
- efx_rx_hash_support_t hash_support;
- efx_rx_hash_type_t rss_hash_types;
- unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
- uint8_t rss_key[EFX_RSS_KEY_SIZE];
-#endif
+ struct sfc_rss rss;
/*
* Shared memory copy of the Rx datapath name to be used by
@@ -302,6 +309,10 @@ int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
size_t len, int socket_id, efsys_mem_t *esmp);
void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp);
+uint32_t sfc_register_logtype(struct sfc_adapter *sa,
+ const char *lt_prefix_str,
+ uint32_t ll_default);
+
int sfc_probe(struct sfc_adapter *sa);
void sfc_unprobe(struct sfc_adapter *sa);
int sfc_attach(struct sfc_adapter *sa);
diff --git a/drivers/net/sfc/sfc_dp.c b/drivers/net/sfc/sfc_dp.c
index 9a5ca20b..b121dc09 100644
--- a/drivers/net/sfc/sfc_dp.c
+++ b/drivers/net/sfc/sfc_dp.c
@@ -14,6 +14,7 @@
#include <rte_log.h>
#include "sfc_dp.h"
+#include "sfc_log.h"
void
sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id,
@@ -63,8 +64,8 @@ int
sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
{
if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
- rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,
- "sfc %s dapapath '%s' already registered\n",
+ SFC_GENERIC_LOG(ERR,
+ "sfc %s dapapath '%s' already registered",
entry->type == SFC_DP_RX ? "Rx" :
entry->type == SFC_DP_TX ? "Tx" :
"unknown",
diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h
index b142532d..3da65abe 100644
--- a/drivers/net/sfc/sfc_dp.h
+++ b/drivers/net/sfc/sfc_dp.h
@@ -15,6 +15,8 @@
#include <rte_pci.h>
+#include "sfc_log.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -58,10 +60,10 @@ void sfc_dp_queue_init(struct sfc_dp_queue *dpq,
const struct sfc_dp_queue *_dpq = (dpq); \
const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \
\
- RTE_LOG(level, PMD, \
+ SFC_GENERIC_LOG(level, \
RTE_FMT("%s " PCI_PRI_FMT \
" #%" PRIu16 ".%" PRIu16 ": " \
- RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_HEAD(__VA_ARGS__ ,), \
dp_name, \
_addr->domain, _addr->bus, \
_addr->devid, _addr->function, \
@@ -77,7 +79,8 @@ struct sfc_dp {
enum sfc_dp_type type;
/* Mask of required hardware/firmware capabilities */
unsigned int hw_fw_caps;
-#define SFC_DP_HW_FW_CAP_EF10 0x1
+#define SFC_DP_HW_FW_CAP_EF10 0x1
+#define SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER 0x2
};
/** List of datapath variants */
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
index be725dcb..83faad16 100644
--- a/drivers/net/sfc/sfc_dp_rx.h
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -78,6 +78,8 @@ struct sfc_dp_rx_qcreate_info {
* doorbell
*/
volatile void *mem_bar;
+ /** VI window size shift */
+ unsigned int vi_window_shift;
};
/**
@@ -88,10 +90,23 @@ struct sfc_dp_rx_qcreate_info {
typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
/**
+ * Test if an Rx datapath supports specific mempool ops.
+ *
+ * @param pool The name of the pool operations to test.
+ *
+ * @return Check status.
+ * @retval 0 Best mempool ops choice.
+ * @retval 1 Mempool ops are supported.
+ * @retval -ENOTSUP Mempool ops not supported.
+ */
+typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool);
+
+/**
* Get size of receive and event queue rings by the number of Rx
- * descriptors.
+ * descriptors and mempool configuration.
*
* @param nb_rx_desc Number of Rx descriptors
+ * @param mb_pool mbuf pool with Rx buffers
* @param rxq_entries Location for number of Rx ring entries
* @param evq_entries Location for number of event ring entries
* @param rxq_max_fill_level Location for maximum Rx ring fill level
@@ -99,6 +114,7 @@ typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
* @return 0 or positive errno.
*/
typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc,
+ struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
unsigned int *rxq_max_fill_level);
@@ -146,6 +162,12 @@ typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
/**
+ * Packed stream receive event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int id);
+
+/**
* Receive queue purge function called after queue flush.
*
* Should be used to free unused recevie buffers.
@@ -171,13 +193,17 @@ struct sfc_dp_rx {
#define SFC_DP_RX_FEAT_SCATTER 0x1
#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2
#define SFC_DP_RX_FEAT_TUNNELS 0x4
+#define SFC_DP_RX_FEAT_FLOW_FLAG 0x8
+#define SFC_DP_RX_FEAT_FLOW_MARK 0x10
sfc_dp_rx_get_dev_info_t *get_dev_info;
+ sfc_dp_rx_pool_ops_supported_t *pool_ops_supported;
sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_rx_qcreate_t *qcreate;
sfc_dp_rx_qdestroy_t *qdestroy;
sfc_dp_rx_qstart_t *qstart;
sfc_dp_rx_qstop_t *qstop;
sfc_dp_rx_qrx_ev_t *qrx_ev;
+ sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev;
sfc_dp_rx_qpurge_t *qpurge;
sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
sfc_dp_rx_qdesc_npending_t *qdesc_npending;
@@ -203,6 +229,7 @@ sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
extern struct sfc_dp_rx sfc_efx_rx;
extern struct sfc_dp_rx sfc_ef10_rx;
+extern struct sfc_dp_rx sfc_ef10_essb_rx;
#ifdef __cplusplus
}
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
index 0c1aad90..a075612c 100644
--- a/drivers/net/sfc/sfc_dp_tx.h
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -57,6 +57,8 @@ struct sfc_dp_tx_qcreate_info {
unsigned int hw_index;
/** Virtual address of the memory-mapped BAR to push Tx doorbell */
volatile void *mem_bar;
+ /** VI window size shift */
+ unsigned int vi_window_shift;
};
/**
diff --git a/drivers/net/sfc/sfc_ef10.h b/drivers/net/sfc/sfc_ef10.h
index ace6a1dd..a73e0bde 100644
--- a/drivers/net/sfc/sfc_ef10.h
+++ b/drivers/net/sfc/sfc_ef10.h
@@ -79,6 +79,40 @@ sfc_ef10_ev_present(const efx_qword_t ev)
~EFX_QWORD_FIELD(ev, EFX_DWORD_1);
}
+
+/**
+ * Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary.
+ */
+#define SFC_EF10_RX_WPTR_ALIGN 8u
+
+static inline void
+sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added,
+ unsigned int ptr_mask)
+{
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
+ SFC_ASSERT(RTE_ALIGN(added, SFC_EF10_RX_WPTR_ALIGN) == added);
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, added & ptr_mask);
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_write32() has rte_io_wmb() which guarantees that the STORE
+ * operations (i.e. Rx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_write32(dword.ed_u32[0], doorbell);
+}
+
+
+const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps);
+
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
new file mode 100644
index 00000000..5f5af602
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -0,0 +1,700 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+/* EF10 equal stride packed stream receive native datapath implementation */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_tweak.h"
+#include "sfc_dp_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+/* Tunnels are not supported */
+#define SFC_EF10_RX_EV_ENCAP_SUPPORT 0
+#include "sfc_ef10_rx_ev.h"
+
+#define sfc_ef10_essb_rx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__)
+
+#define sfc_ef10_essb_rx_info(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__)
+
+/*
+ * Fake length for RXQ descriptors in equal stride super-buffer mode
+ * to make hardware happy.
+ */
+#define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
+
+/**
+ * Minimum number of Rx buffers the datapath allows to use.
+ *
+ * Each HW Rx descriptor has many Rx buffers. The number of buffers
+ * in one HW Rx descriptor is equal to size of contiguous block
+ * provided by Rx buffers memory pool. The contiguous block size
+ * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf
+ * data size specified on the memory pool creation. Typical rte_mbuf
+ * data size is about 2k which makes a bit less than 32 buffers in
+ * contiguous block with default bucket size equal to 64k.
+ * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN),
+ * it makes about 256 as required minimum. Double it in advertised
+ * minimum to allow for at least 2 refill blocks.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_MIN 512
+
+/**
+ * Number of Rx buffers should be aligned to.
+ *
+ * There are no extra requirements on alignment since actual number of
+ * pushed Rx buffers will be multiple by contiguous block size which
+ * is unknown beforehand.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_ALIGN 1
+
+/**
+ * Maximum number of descriptors/buffers in the Rx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ */
+#define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \
+ ((_nevs) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_essb_rx_sw_desc {
+ struct rte_mbuf *first_mbuf;
+};
+
+struct sfc_ef10_essb_rxq {
+ /* Used on data path */
+ unsigned int flags;
+#define SFC_EF10_ESSB_RXQ_STARTED 0x1
+#define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2
+#define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4
+ unsigned int rxq_ptr_mask;
+ unsigned int block_size;
+ unsigned int buf_stride;
+ unsigned int bufs_ptr;
+ unsigned int completed;
+ unsigned int pending_id;
+ unsigned int bufs_pending;
+ unsigned int left_in_completed;
+ unsigned int left_in_pending;
+ unsigned int evq_read_ptr;
+ unsigned int evq_ptr_mask;
+ efx_qword_t *evq_hw_ring;
+ struct sfc_ef10_essb_rx_sw_desc *sw_ring;
+ uint16_t port_id;
+
+ /* Used on refill */
+ unsigned int added;
+ unsigned int max_fill_level;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ efx_qword_t *rxq_hw_ring;
+ volatile void *doorbell;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_ef10_essb_rxq *
+sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
+ struct rte_mbuf *mbuf)
+{
+ return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
+ struct rte_mbuf *mbuf, unsigned int idx)
+{
+ return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq)
+{
+ const struct sfc_ef10_essb_rx_sw_desc *rxd;
+
+ if (rxq->left_in_completed != 0) {
+ rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+ return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_completed);
+ } else {
+ rxq->completed++;
+ rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+ rxq->left_in_completed = rxq->block_size;
+ return rxd->first_mbuf;
+ }
+}
+
+static void
+sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
+{
+ const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask;
+ unsigned int free_space;
+ unsigned int bulks;
+ void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN];
+ unsigned int added = rxq->added;
+
+ free_space = rxq->max_fill_level - (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(mbuf_blocks);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ do {
+ unsigned int id;
+ unsigned int i;
+
+ if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool,
+ mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) {
+ struct rte_eth_dev_data *dev_data =
+ rte_eth_devices[rxq->port_id].data;
+
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0, id = added & rxq_ptr_mask;
+ i < RTE_DIM(mbuf_blocks);
+ ++i, ++id) {
+ struct rte_mbuf *m = mbuf_blocks[i];
+ struct sfc_ef10_essb_rx_sw_desc *rxd;
+
+ SFC_ASSERT((id & ~rxq_ptr_mask) == 0);
+ rxd = &rxq->sw_ring[id];
+ rxd->first_mbuf = m;
+
+ /* RX_KER_BYTE_CNT is ignored by firmware */
+ EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
+ ESF_DZ_RX_KER_BYTE_CNT,
+ SFC_EF10_ESSB_RX_FAKE_BUF_SIZE,
+ ESF_DZ_RX_KER_BUF_ADDR,
+ rte_mbuf_data_iova_default(m));
+ }
+
+ added += RTE_DIM(mbuf_blocks);
+
+ } while (--bulks > 0);
+
+ SFC_ASSERT(rxq->added != added);
+ rxq->added = added;
+ sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
+}
+
+static bool
+sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev)
+{
+ *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask];
+
+ if (!sfc_ef10_ev_present(*rx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_RX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling
+ */
+ rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION;
+ sfc_ef10_essb_rx_err(&rxq->dp.dpq,
+ "RxQ exception at EvQ read ptr %#x",
+ rxq->evq_read_ptr);
+ return false;
+ }
+
+ rxq->evq_read_ptr++;
+ return true;
+}
+
+static void
+sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev)
+{
+ unsigned int ready;
+
+ ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
+ rxq->bufs_ptr) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+
+ rxq->bufs_ptr += ready;
+ rxq->bufs_pending += ready;
+
+ SFC_ASSERT(ready > 0);
+ do {
+ const struct sfc_ef10_essb_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ unsigned int todo_bufs;
+ struct rte_mbuf *m0;
+
+ rxd = &rxq->sw_ring[rxq->pending_id];
+ m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_pending);
+
+ if (ready < rxq->left_in_pending) {
+ todo_bufs = ready;
+ ready = 0;
+ rxq->left_in_pending -= todo_bufs;
+ } else {
+ todo_bufs = rxq->left_in_pending;
+ ready -= todo_bufs;
+ rxq->left_in_pending = rxq->block_size;
+ if (rxq->pending_id != rxq->rxq_ptr_mask)
+ rxq->pending_id++;
+ else
+ rxq->pending_id = 0;
+ }
+
+ SFC_ASSERT(todo_bufs > 0);
+ --todo_bufs;
+
+ sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull);
+
+ /* Prefetch pseudo-header */
+ rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
+
+ m0 = m;
+ while (todo_bufs-- > 0) {
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ m->ol_flags = m0->ol_flags;
+ m->packet_type = m0->packet_type;
+ /* Prefetch pseudo-header */
+ rte_prefetch0((uint8_t *)m->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ }
+ } while (ready > 0);
+}
+
+static unsigned int
+sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ unsigned int n_rx_pkts = 0;
+ unsigned int todo_bufs;
+ struct rte_mbuf *m;
+
+ while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts,
+ rxq->bufs_pending)) > 0) {
+ m = sfc_ef10_essb_maybe_next_completed(rxq);
+
+ todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed);
+
+ rxq->bufs_pending -= todo_bufs;
+ rxq->left_in_completed -= todo_bufs;
+
+ SFC_ASSERT(todo_bufs > 0);
+ todo_bufs--;
+
+ do {
+ const efx_qword_t *qwordp;
+ uint16_t pkt_len;
+
+ rx_pkts[n_rx_pkts++] = m;
+
+ /* Parse pseudo-header */
+ qwordp = (const efx_qword_t *)
+ ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
+ pkt_len =
+ EFX_QWORD_FIELD(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_DATA_LEN);
+
+ m->data_off = RTE_PKTMBUF_HEADROOM +
+ ES_EZ_ESSB_RX_PREFIX_LEN;
+ m->port = rxq->port_id;
+
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ rte_pktmbuf_data_len(m) = pkt_len;
+
+ m->ol_flags |=
+ (PKT_RX_RSS_HASH *
+ !!EFX_TEST_QWORD_BIT(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
+ (PKT_RX_FDIR_ID *
+ !!EFX_TEST_QWORD_BIT(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
+ (PKT_RX_FDIR *
+ !!EFX_TEST_QWORD_BIT(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
+
+ /* EFX_QWORD_FIELD converts little-endian to CPU */
+ m->hash.rss =
+ EFX_QWORD_FIELD(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_HASH);
+ m->hash.fdir.hi =
+ EFX_QWORD_FIELD(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_MARK);
+
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ } while (todo_bufs-- > 0);
+ }
+
+ return n_rx_pkts;
+}
+
+
+static uint16_t
+sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue);
+ const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+ uint16_t n_rx_pkts;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
+ SFC_EF10_ESSB_RXQ_EXCEPTION)))
+ return 0;
+
+ n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts);
+
+ while (n_rx_pkts != nb_pkts &&
+ sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
+ n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq,
+ rx_pkts + n_rx_pkts,
+ nb_pkts - n_rx_pkts);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
+ evq_old_read_ptr, rxq->evq_read_ptr);
+
+ /* It is not a problem if we refill in the case of exception */
+ sfc_ef10_essb_rx_qrefill(rxq);
+
+ return n_rx_pkts;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
+static unsigned int
+sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+{
+ /*
+ * Correct implementation requires EvQ polling and events
+ * processing.
+ */
+ return -ENOTSUP;
+}
+
+static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status;
+static int
+sfc_ef10_essb_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
+ __rte_unused uint16_t offset)
+{
+ return -ENOTSUP;
+}
+
+static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
+static void
+sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
+{
+ /*
+ * Number of descriptors just defines maximum number of pushed
+ * descriptors (fill level).
+ */
+ dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN;
+ dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN;
+}
+
+static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
+static int
+sfc_ef10_essb_rx_pool_ops_supported(const char *pool)
+{
+ SFC_ASSERT(pool != NULL);
+
+ if (strcmp(pool, "bucket") == 0)
+ return 0;
+
+ return -ENOTSUP;
+}
+
+static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
+static int
+sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ struct rte_mempool *mb_pool,
+ unsigned int *rxq_entries,
+ unsigned int *evq_entries,
+ unsigned int *rxq_max_fill_level)
+{
+ int rc;
+ struct rte_mempool_info mp_info;
+ unsigned int nb_hw_rx_desc;
+ unsigned int max_events;
+
+ rc = rte_mempool_ops_get_info(mb_pool, &mp_info);
+ if (rc != 0)
+ return -rc;
+ if (mp_info.contig_block_size == 0)
+ return EINVAL;
+
+ /*
+ * Calculate required number of hardware Rx descriptors each
+ * carrying contig block size Rx buffers.
+ * It cannot be less than Rx write pointer alignment plus 1
+ * in order to avoid cases when the ring is guaranteed to be
+ * empty.
+ */
+ nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
+ mp_info.contig_block_size),
+ SFC_EF10_RX_WPTR_ALIGN + 1);
+ if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) {
+ *rxq_entries = EFX_RXQ_MINNDESCS;
+ } else {
+ *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
+ if (*rxq_entries > EFX_RXQ_MAXNDESCS)
+ return EINVAL;
+ }
+
+ max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) *
+ mp_info.contig_block_size +
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ +
+ 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
+
+ *evq_entries = rte_align32pow2(max_events);
+ *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS);
+ *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS);
+
+ /*
+ * May be even maximum event queue size is insufficient to handle
+ * so many Rx descriptors. If so, we should limit Rx queue fill level.
+ */
+ *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
+ SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries));
+ return 0;
+}
+
+static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate;
+static int
+sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct rte_mempool * const mp = info->refill_mb_pool;
+ struct rte_mempool_info mp_info;
+ struct sfc_ef10_essb_rxq *rxq;
+ int rc;
+
+ rc = rte_mempool_ops_get_info(mp, &mp_info);
+ if (rc != 0) {
+ /* Positive errno is used in the driver */
+ rc = -rc;
+ goto fail_get_contig_block_size;
+ }
+
+ /* Check if the mempool provides block dequeue */
+ rc = EINVAL;
+ if (mp_info.contig_block_size == 0)
+ goto fail_no_block_dequeue;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
+ info->rxq_entries,
+ sizeof(*rxq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_ring == NULL)
+ goto fail_desc_alloc;
+
+ rxq->block_size = mp_info.contig_block_size;
+ rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size;
+ rxq->rxq_ptr_mask = info->rxq_entries - 1;
+ rxq->evq_ptr_mask = info->evq_entries - 1;
+ rxq->evq_hw_ring = info->evq_hw_ring;
+ rxq->port_id = port_id;
+
+ rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size;
+ rxq->refill_threshold =
+ RTE_MAX(info->refill_threshold / mp_info.contig_block_size,
+ SFC_EF10_RX_WPTR_ALIGN);
+ rxq->refill_mb_pool = mp;
+ rxq->rxq_hw_ring = info->rxq_hw_ring;
+
+ rxq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_RX_DESC_UPD_REG_OFST +
+ (info->hw_index << info->vi_window_shift);
+
+ sfc_ef10_essb_rx_info(&rxq->dp.dpq,
+ "block size is %u, buf stride is %u",
+ rxq->block_size, rxq->buf_stride);
+ sfc_ef10_essb_rx_info(&rxq->dp.dpq,
+ "max fill level is %u descs (%u bufs), "
+ "refill threashold %u descs (%u bufs)",
+ rxq->max_fill_level,
+ rxq->max_fill_level * rxq->block_size,
+ rxq->refill_threshold,
+ rxq->refill_threshold * rxq->block_size);
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+fail_no_block_dequeue:
+fail_get_contig_block_size:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy;
+static void
+sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
+static int
+sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->evq_read_ptr = evq_read_ptr;
+
+ /* Initialize before refill */
+ rxq->completed = rxq->pending_id = rxq->added = 0;
+ rxq->left_in_completed = rxq->left_in_pending = rxq->block_size;
+ rxq->bufs_ptr = UINT_MAX;
+ rxq->bufs_pending = 0;
+
+ sfc_ef10_essb_rx_qrefill(rxq);
+
+ rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED;
+ rxq->flags &=
+ ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop;
+static void
+sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING;
+
+ *evq_read_ptr = rxq->evq_read_ptr;
+}
+
+static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev;
+static bool
+sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_essb_rxq *rxq;
+
+ rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Rx event since we free all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge;
+static void
+sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i, j;
+ const struct sfc_ef10_essb_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+
+ if (rxq->completed != rxq->added && rxq->left_in_completed > 0) {
+ rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+ m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_completed);
+ do {
+ rxq->left_in_completed--;
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ } while (rxq->left_in_completed > 0);
+ rxq->completed++;
+ }
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
+ m = rxd->first_mbuf;
+ for (j = 0; j < rxq->block_size; ++j) {
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ }
+ }
+
+ rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
+}
+
+struct sfc_dp_rx sfc_ef10_essb_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10_ESSB,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 |
+ SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
+ },
+ .features = SFC_DP_RX_FEAT_FLOW_FLAG |
+ SFC_DP_RX_FEAT_FLOW_MARK,
+ .get_dev_info = sfc_ef10_essb_rx_get_dev_info,
+ .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
+ .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
+ .qcreate = sfc_ef10_essb_rx_qcreate,
+ .qdestroy = sfc_ef10_essb_rx_qdestroy,
+ .qstart = sfc_ef10_essb_rx_qstart,
+ .qstop = sfc_ef10_essb_rx_qstop,
+ .qrx_ev = sfc_ef10_essb_rx_qrx_ev,
+ .qpurge = sfc_ef10_essb_rx_qpurge,
+ .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
+ .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending,
+ .qdesc_status = sfc_ef10_essb_rx_qdesc_status,
+ .pkt_burst = sfc_ef10_essb_recv_pkts,
+};
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 0b3e8fbb..42b35b9b 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -26,16 +26,13 @@
#include "sfc_kvargs.h"
#include "sfc_ef10.h"
+#define SFC_EF10_RX_EV_ENCAP_SUPPORT 1
+#include "sfc_ef10_rx_ev.h"
+
#define sfc_ef10_rx_err(dpq, ...) \
SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
/**
- * Alignment requirement for value written to RX WPTR:
- * the WPTR must be aligned to an 8 descriptor boundary.
- */
-#define SFC_EF10_RX_WPTR_ALIGN 8
-
-/**
* Maximum number of descriptors/buffers in the Rx ring.
* It should guarantee that corresponding event queue never overfill.
* EF10 native datapath uses event queue of the same size as Rx queue.
@@ -88,29 +85,6 @@ sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
}
static void
-sfc_ef10_rx_qpush(struct sfc_ef10_rxq *rxq)
-{
- efx_dword_t dword;
-
- /* Hardware has alignment restriction for WPTR */
- RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
- SFC_ASSERT(RTE_ALIGN(rxq->added, SFC_EF10_RX_WPTR_ALIGN) == rxq->added);
-
- EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR,
- rxq->added & rxq->ptr_mask);
-
- /* DMA sync to device is not required */
-
- /*
- * rte_write32() has rte_io_wmb() which guarantees that the STORE
- * operations (i.e. Rx and event descriptor updates) that precede
- * the rte_io_wmb() call are visible to NIC before the STORE
- * operations that follow it (i.e. doorbell write).
- */
- rte_write32(dword.ed_u32[0], rxq->doorbell);
-}
-
-static void
sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
{
const unsigned int ptr_mask = rxq->ptr_mask;
@@ -120,6 +94,8 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
void *objs[SFC_RX_REFILL_BULK];
unsigned int added = rxq->added;
+ RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
+
free_space = rxq->max_fill_level - (added - rxq->completed);
if (free_space < rxq->refill_threshold)
@@ -178,7 +154,7 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
SFC_ASSERT(rxq->added != added);
rxq->added = added;
- sfc_ef10_rx_qpush(rxq);
+ sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask);
}
static void
@@ -225,137 +201,6 @@ sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
return n_rx_pkts;
}
-static void
-sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
- struct rte_mbuf *m)
-{
- uint32_t tun_ptype = 0;
- /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */
- int8_t ip_csum_err_bit;
- /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */
- int8_t l4_csum_err_bit;
- uint32_t l2_ptype = 0;
- uint32_t l3_ptype = 0;
- uint32_t l4_ptype = 0;
- uint64_t ol_flags = 0;
-
- if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))
- goto done;
-
- switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) {
- default:
- /* Unexpected encapsulation tag class */
- SFC_ASSERT(false);
- /* FALLTHROUGH */
- case ESE_EZ_ENCAP_HDR_NONE:
- break;
- case ESE_EZ_ENCAP_HDR_VXLAN:
- /*
- * It is definitely UDP, but we have no information
- * about IPv4 vs IPv6 and VLAN tagging.
- */
- tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
- break;
- case ESE_EZ_ENCAP_HDR_GRE:
- /*
- * We have no information about IPv4 vs IPv6 and VLAN tagging.
- */
- tun_ptype = RTE_PTYPE_TUNNEL_NVGRE;
- break;
- }
-
- if (tun_ptype == 0) {
- ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN;
- l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN;
- } else {
- ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN;
- l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN;
- if (unlikely(EFX_TEST_QWORD_BIT(rx_ev,
- ESF_DZ_RX_IPCKSUM_ERR_LBN)))
- ol_flags |= PKT_RX_EIP_CKSUM_BAD;
- }
-
- switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
- case ESE_DZ_ETH_TAG_CLASS_NONE:
- l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER :
- RTE_PTYPE_INNER_L2_ETHER;
- break;
- case ESE_DZ_ETH_TAG_CLASS_VLAN1:
- l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN :
- RTE_PTYPE_INNER_L2_ETHER_VLAN;
- break;
- case ESE_DZ_ETH_TAG_CLASS_VLAN2:
- l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ :
- RTE_PTYPE_INNER_L2_ETHER_QINQ;
- break;
- default:
- /* Unexpected Eth tag class */
- SFC_ASSERT(false);
- }
-
- switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
- case ESE_DZ_L3_CLASS_IP4_FRAG:
- l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
- RTE_PTYPE_INNER_L4_FRAG;
- /* FALLTHROUGH */
- case ESE_DZ_L3_CLASS_IP4:
- l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN :
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
- ol_flags |= PKT_RX_RSS_HASH |
- ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ?
- PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
- break;
- case ESE_DZ_L3_CLASS_IP6_FRAG:
- l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
- RTE_PTYPE_INNER_L4_FRAG;
- /* FALLTHROUGH */
- case ESE_DZ_L3_CLASS_IP6:
- l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
- ol_flags |= PKT_RX_RSS_HASH;
- break;
- case ESE_DZ_L3_CLASS_ARP:
- /* Override Layer 2 packet type */
- /* There is no ARP classification for inner packets */
- if (tun_ptype == 0)
- l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
- break;
- default:
- /* Unexpected Layer 3 class */
- SFC_ASSERT(false);
- }
-
- switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) {
- case ESE_DZ_L4_CLASS_TCP:
- l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP :
- RTE_PTYPE_INNER_L4_TCP;
- ol_flags |=
- (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
- PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
- break;
- case ESE_DZ_L4_CLASS_UDP:
- l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP :
- RTE_PTYPE_INNER_L4_UDP;
- ol_flags |=
- (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
- PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
- break;
- case ESE_DZ_L4_CLASS_UNKNOWN:
- break;
- default:
- /* Unexpected Layer 4 class */
- SFC_ASSERT(false);
- }
-
- /* Remove RSS hash offload flag if RSS is not enabled */
- if (~rxq->flags & SFC_EF10_RXQ_RSS_HASH)
- ol_flags &= ~PKT_RX_RSS_HASH;
-
-done:
- m->ol_flags = ol_flags;
- m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype;
-}
-
static uint16_t
sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
{
@@ -414,7 +259,10 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
m->rearm_data[0] = rxq->rearm_data;
/* Classify packet based on Rx event */
- sfc_ef10_rx_ev_to_offloads(rxq, rx_ev, m);
+ /* Mask RSS hash offload flag if RSS is not enabled */
+ sfc_ef10_rx_ev_to_offloads(rx_ev, m,
+ (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
+ ~0ull : ~PKT_RX_RSS_HASH);
/* data_off already moved past pseudo header */
pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
@@ -538,7 +386,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return n_rx_pkts;
}
-static const uint32_t *
+const uint32_t *
sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
{
static const uint32_t ef10_native_ptypes[] = {
@@ -587,8 +435,8 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
1u << EFX_TUNNEL_PROTOCOL_NVGRE):
return ef10_overlay_ptypes;
default:
- RTE_LOG(ERR, PMD,
- "Unexpected set of supported tunnel encapsulations: %#x\n",
+ SFC_GENERIC_LOG(ERR,
+ "Unexpected set of supported tunnel encapsulations: %#x",
tunnel_encaps);
/* FALLTHROUGH */
case 0:
@@ -632,6 +480,7 @@ sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
static int
sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ __rte_unused struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
unsigned int *rxq_max_fill_level)
@@ -716,7 +565,7 @@ sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
rxq->rxq_hw_ring = info->rxq_hw_ring;
rxq->doorbell = (volatile uint8_t *)info->mem_bar +
ER_DZ_RX_DESC_UPD_REG_OFST +
- info->hw_index * ER_DZ_RX_DESC_UPD_REG_STEP;
+ (info->hw_index << info->vi_window_shift);
*dp_rxqp = &rxq->dp;
return 0;
diff --git a/drivers/net/sfc/sfc_ef10_rx_ev.h b/drivers/net/sfc/sfc_ef10_rx_ev.h
new file mode 100644
index 00000000..615bd29b
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10_rx_ev.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_EF10_RX_EV_H
+#define _SFC_EF10_RX_EV_H
+
+#include <rte_mbuf.h>
+
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void
+sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
+ uint64_t ol_mask)
+{
+ uint32_t tun_ptype = 0;
+ /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */
+ int8_t ip_csum_err_bit;
+ /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */
+ int8_t l4_csum_err_bit;
+ uint32_t l2_ptype = 0;
+ uint32_t l3_ptype = 0;
+ uint32_t l4_ptype = 0;
+ uint64_t ol_flags = 0;
+
+ if (unlikely(rx_ev.eq_u64[0] &
+ rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_ECRC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN))))
+ goto done;
+
+#if SFC_EF10_RX_EV_ENCAP_SUPPORT
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) {
+ default:
+ /* Unexpected encapsulation tag class */
+ SFC_ASSERT(false);
+ /* FALLTHROUGH */
+ case ESE_EZ_ENCAP_HDR_NONE:
+ break;
+ case ESE_EZ_ENCAP_HDR_VXLAN:
+ /*
+ * It is definitely UDP, but we have no information
+ * about IPv4 vs IPv6 and VLAN tagging.
+ */
+ tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
+ break;
+ case ESE_EZ_ENCAP_HDR_GRE:
+ /*
+ * We have no information about IPv4 vs IPv6 and VLAN tagging.
+ */
+ tun_ptype = RTE_PTYPE_TUNNEL_NVGRE;
+ break;
+ }
+#endif
+
+ if (tun_ptype == 0) {
+ ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN;
+ l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN;
+ } else {
+ ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN;
+ l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN;
+ if (unlikely(EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_IPCKSUM_ERR_LBN)))
+ ol_flags |= PKT_RX_EIP_CKSUM_BAD;
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
+ case ESE_DZ_ETH_TAG_CLASS_NONE:
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER :
+ RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN1:
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN :
+ RTE_PTYPE_INNER_L2_ETHER_VLAN;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN2:
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ :
+ RTE_PTYPE_INNER_L2_ETHER_QINQ;
+ break;
+ default:
+ /* Unexpected Eth tag class */
+ SFC_ASSERT(false);
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+ RTE_PTYPE_INNER_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP4:
+ l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN :
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH |
+ ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ?
+ PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ break;
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+ RTE_PTYPE_INNER_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP6:
+ l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH;
+ break;
+ case ESE_DZ_L3_CLASS_ARP:
+ /* Override Layer 2 packet type */
+ /* There is no ARP classification for inner packets */
+ if (tun_ptype == 0)
+ l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ default:
+ /* Unexpected Layer 3 class */
+ SFC_ASSERT(false);
+ }
+
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
+ * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
+ * and values for all EF10 controllers.
+ */
+ RTE_BUILD_BUG_ON(ESF_FZ_RX_L4_CLASS_LBN != ESF_DE_RX_L4_CLASS_LBN);
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_FZ_RX_L4_CLASS)) {
+ case ESE_FZ_L4_CLASS_TCP:
+ RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_TCP != ESE_DE_L4_CLASS_TCP);
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP :
+ RTE_PTYPE_INNER_L4_TCP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_FZ_L4_CLASS_UDP:
+ RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UDP != ESE_DE_L4_CLASS_UDP);
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP :
+ RTE_PTYPE_INNER_L4_UDP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_FZ_L4_CLASS_UNKNOWN:
+ RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UNKNOWN !=
+ ESE_DE_L4_CLASS_UNKNOWN);
+ break;
+ default:
+ /* Unexpected Layer 4 class */
+ SFC_ASSERT(false);
+ }
+
+done:
+ m->ol_flags = ol_flags & ol_mask;
+ m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EF10_RX_EV_H */
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index 12387972..d0daa3b3 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -531,7 +531,7 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
txq->txq_hw_ring = info->txq_hw_ring;
txq->doorbell = (volatile uint8_t *)info->mem_bar +
ER_DZ_TX_DESC_UPD_REG_OFST +
- info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
+ (info->hw_index << info->vi_window_shift);
txq->evq_hw_ring = info->evq_hw_ring;
*dp_txqp = &txq->dp;
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 89a45290..1b6499f8 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -13,6 +13,7 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_errno.h>
+#include <rte_string_fns.h>
#include "efx.h"
@@ -27,6 +28,8 @@
#include "sfc_dp.h"
#include "sfc_dp_rx.h"
+uint32_t sfc_logtype_driver;
+
static struct sfc_dp_list sfc_dp_head =
TAILQ_HEAD_INITIALIZER(sfc_dp_head);
@@ -82,12 +85,11 @@ static void
sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct sfc_adapter *sa = dev->data->dev_private;
- const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_rss *rss = &sa->rss;
uint64_t txq_offloads_def = 0;
sfc_log_init(sa, "entry");
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
/* Autonegotiation may be disabled */
@@ -96,8 +98,14 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa |= ETH_LINK_SPEED_1G;
if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_25G;
if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_100G;
dev_info->max_rx_queues = sa->rxq_max;
dev_info->max_tx_queues = sa->txq_max;
@@ -130,27 +138,17 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_txconf.offloads |= txq_offloads_def;
- dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
- if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) ||
- !encp->enc_hw_tx_insert_vlan_enabled)
- dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
-
- if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
- dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
-
- if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)
- dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP;
+ if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
+ uint64_t rte_hf = 0;
+ unsigned int i;
- if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)
- dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
+ for (i = 0; i < rss->hf_map_nb_entries; ++i)
+ rte_hf |= rss->hf_map[i].rte;
-#if EFSYS_OPT_RX_SCALE
- if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
dev_info->reta_size = EFX_RSS_TBL_SIZE;
dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
- dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS;
+ dev_info->flow_type_rss_offloads = rte_hf;
}
-#endif
/* Initialize to hardware limits */
dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
@@ -236,22 +234,13 @@ static int
sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
struct sfc_adapter *sa = dev->data->dev_private;
- struct rte_eth_link *dev_link = &dev->data->dev_link;
- struct rte_eth_link old_link;
struct rte_eth_link current_link;
+ int ret;
sfc_log_init(sa, "entry");
-retry:
- EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
- *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
-
if (sa->state != SFC_ADAPTER_STARTED) {
sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &current_link);
- if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
- *(uint64_t *)&old_link,
- *(uint64_t *)&current_link))
- goto retry;
} else if (wait_to_complete) {
efx_link_mode_t link_mode;
@@ -259,21 +248,17 @@ retry:
link_mode = EFX_LINK_UNKNOWN;
sfc_port_link_mode_to_info(link_mode, &current_link);
- if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
- *(uint64_t *)&old_link,
- *(uint64_t *)&current_link))
- goto retry;
} else {
sfc_ev_mgmt_qpoll(sa);
- *(int64_t *)&current_link =
- rte_atomic64_read((rte_atomic64_t *)dev_link);
+ rte_eth_linkstatus_get(dev, &current_link);
}
- if (old_link.link_status != current_link.link_status)
- sfc_info(sa, "Link status is %s",
- current_link.link_status ? "UP" : "DOWN");
+ ret = rte_eth_linkstatus_set(dev, &current_link);
+ if (ret == 0)
+ sfc_notice(sa, "Link status is %s",
+ current_link.link_status ? "UP" : "DOWN");
- return old_link.link_status == current_link.link_status ? 0 : -1;
+ return ret;
}
static void
@@ -664,7 +649,7 @@ sfc_xstats_get_names(struct rte_eth_dev *dev,
for (i = 0; i < EFX_MAC_NSTATS; ++i) {
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
if (xstats_names != NULL && nstats < xstats_count)
- strncpy(xstats_names[nstats].name,
+ strlcpy(xstats_names[nstats].name,
efx_mac_stat_name(sa->nic, i),
sizeof(xstats_names[0].name));
nstats++;
@@ -742,9 +727,8 @@ sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
char *name = xstats_names[nb_written++].name;
- strncpy(name, efx_mac_stat_name(sa->nic, i),
+ strlcpy(name, efx_mac_stat_name(sa->nic, i),
sizeof(xstats_names[0].name));
- name[sizeof(xstats_names[0].name) - 1] = '\0';
}
++nb_supported;
@@ -920,13 +904,14 @@ fail_inval:
SFC_ASSERT(rc > 0);
return -rc;
}
-static void
+static int
sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct sfc_adapter *sa = dev->data->dev_private;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
struct sfc_port *port = &sa->port;
- int rc;
+ struct ether_addr *old_addr = &dev->data->mac_addrs[0];
+ int rc = 0;
sfc_adapter_lock(sa);
@@ -936,15 +921,22 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
*/
ether_addr_copy(mac_addr, &port->default_mac_addr);
+ /*
+ * Neither of the two following checks can return
+ * an error. The new MAC address is preserved in
+ * the device private data and can be activated
+ * on the next port start if the user prevents
+ * isolated mode from being enabled.
+ */
if (port->isolated) {
- sfc_err(sa, "isolated mode is active on the port");
- sfc_err(sa, "will not set MAC address");
+ sfc_warn(sa, "isolated mode is active on the port");
+ sfc_warn(sa, "will not set MAC address");
goto unlock;
}
if (sa->state != SFC_ADAPTER_STARTED) {
- sfc_info(sa, "the port is not started");
- sfc_info(sa, "the new MAC address will be set on port start");
+ sfc_notice(sa, "the port is not started");
+ sfc_notice(sa, "the new MAC address will be set on port start");
goto unlock;
}
@@ -962,8 +954,12 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
* we also need to update unicast filters
*/
rc = sfc_set_rx_mode(sa);
- if (rc != 0)
+ if (rc != 0) {
sfc_err(sa, "cannot set filter (rc = %u)", rc);
+ /* Rollback the old address */
+ (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
+ (void)sfc_set_rx_mode(sa);
+ }
} else {
sfc_warn(sa, "cannot set MAC address with filters installed");
sfc_warn(sa, "adapter will be restarted to pick the new MAC");
@@ -982,14 +978,13 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
}
unlock:
- /*
- * In the case of failure sa->port->default_mac_addr does not
- * need rollback since no error code is returned, and the upper
- * API will anyway update the external MAC address storage.
- * To be consistent with that new value it is better to keep
- * the device private value the same.
- */
+ if (rc != 0)
+ ether_addr_copy(old_addr, &port->default_mac_addr);
+
sfc_adapter_unlock(sa);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
}
@@ -1352,18 +1347,18 @@ sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
}
-#if EFSYS_OPT_RX_SCALE
static int
sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
struct sfc_port *port = &sa->port;
- if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
return -ENOTSUP;
- if (sa->rss_channels == 0)
+ if (rss->channels == 0)
return -EINVAL;
sfc_adapter_lock(sa);
@@ -1374,10 +1369,10 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
* flags which corresponds to the active EFX configuration stored
* locally in 'sfc_adapter' and kept up-to-date
*/
- rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types);
+ rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(sa, rss->hash_types);
rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
if (rss_conf->rss_key != NULL)
- rte_memcpy(rss_conf->rss_key, sa->rss_key, EFX_RSS_KEY_SIZE);
+ rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
sfc_adapter_unlock(sa);
@@ -1389,6 +1384,7 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
struct sfc_port *port = &sa->port;
unsigned int efx_hash_types;
int rc = 0;
@@ -1396,35 +1392,31 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
if (port->isolated)
return -ENOTSUP;
- if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
sfc_err(sa, "RSS is not available");
return -ENOTSUP;
}
- if (sa->rss_channels == 0) {
+ if (rss->channels == 0) {
sfc_err(sa, "RSS is not configured");
return -EINVAL;
}
if ((rss_conf->rss_key != NULL) &&
- (rss_conf->rss_key_len != sizeof(sa->rss_key))) {
+ (rss_conf->rss_key_len != sizeof(rss->key))) {
sfc_err(sa, "RSS key size is wrong (should be %lu)",
- sizeof(sa->rss_key));
- return -EINVAL;
- }
-
- if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
- sfc_err(sa, "unsupported hash functions requested");
+ sizeof(rss->key));
return -EINVAL;
}
sfc_adapter_lock(sa);
- efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf);
+ rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
+ if (rc != 0)
+ goto fail_rx_hf_rte_to_efx;
rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- EFX_RX_HASHALG_TOEPLITZ,
- efx_hash_types, B_TRUE);
+ rss->hash_alg, efx_hash_types, B_TRUE);
if (rc != 0)
goto fail_scale_mode_set;
@@ -1433,15 +1425,15 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
rc = efx_rx_scale_key_set(sa->nic,
EFX_RSS_CONTEXT_DEFAULT,
rss_conf->rss_key,
- sizeof(sa->rss_key));
+ sizeof(rss->key));
if (rc != 0)
goto fail_scale_key_set;
}
- rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key));
+ rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
}
- sa->rss_hash_types = efx_hash_types;
+ rss->hash_types = efx_hash_types;
sfc_adapter_unlock(sa);
@@ -1450,10 +1442,11 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
fail_scale_key_set:
if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
EFX_RX_HASHALG_TOEPLITZ,
- sa->rss_hash_types, B_TRUE) != 0)
+ rss->hash_types, B_TRUE) != 0)
sfc_err(sa, "failed to restore RSS mode");
fail_scale_mode_set:
+fail_rx_hf_rte_to_efx:
sfc_adapter_unlock(sa);
return -rc;
}
@@ -1464,13 +1457,14 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
struct sfc_port *port = &sa->port;
int entry;
- if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
return -ENOTSUP;
- if (sa->rss_channels == 0)
+ if (rss->channels == 0)
return -EINVAL;
if (reta_size != EFX_RSS_TBL_SIZE)
@@ -1483,7 +1477,7 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
int grp_idx = entry % RTE_RETA_GROUP_SIZE;
if ((reta_conf[grp].mask >> grp_idx) & 1)
- reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry];
+ reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
}
sfc_adapter_unlock(sa);
@@ -1497,6 +1491,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
uint16_t reta_size)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
struct sfc_port *port = &sa->port;
unsigned int *rss_tbl_new;
uint16_t entry;
@@ -1506,12 +1501,12 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
if (port->isolated)
return -ENOTSUP;
- if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
sfc_err(sa, "RSS is not available");
return -ENOTSUP;
}
- if (sa->rss_channels == 0) {
+ if (rss->channels == 0) {
sfc_err(sa, "RSS is not configured");
return -EINVAL;
}
@@ -1522,13 +1517,13 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
return -EINVAL;
}
- rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0);
+ rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
if (rss_tbl_new == NULL)
return -ENOMEM;
sfc_adapter_lock(sa);
- rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl));
+ rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
for (entry = 0; entry < reta_size; entry++) {
int grp_idx = entry % RTE_RETA_GROUP_SIZE;
@@ -1537,7 +1532,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
if (grp->mask & (1ull << grp_idx)) {
- if (grp->reta[grp_idx] >= sa->rss_channels) {
+ if (grp->reta[grp_idx] >= rss->channels) {
rc = EINVAL;
goto bad_reta_entry;
}
@@ -1552,7 +1547,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
goto fail_scale_tbl_set;
}
- rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
+ rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
fail_scale_tbl_set:
bad_reta_entry:
@@ -1563,7 +1558,6 @@ bad_reta_entry:
SFC_ASSERT(rc >= 0);
return -rc;
}
-#endif
static int
sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
@@ -1621,6 +1615,21 @@ sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
return -rc;
}
+static int
+sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ /*
+ * If Rx datapath does not provide callback to check mempool,
+ * all pools are supported.
+ */
+ if (sa->dp_rx->pool_ops_supported == NULL)
+ return 1;
+
+ return sa->dp_rx->pool_ops_supported(pool);
+}
+
static const struct eth_dev_ops sfc_eth_dev_ops = {
.dev_configure = sfc_dev_configure,
.dev_start = sfc_dev_start,
@@ -1658,12 +1667,10 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.mac_addr_set = sfc_mac_addr_set,
.udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
-#if EFSYS_OPT_RX_SCALE
.reta_update = sfc_dev_rss_reta_update,
.reta_query = sfc_dev_rss_reta_query,
.rss_hash_update = sfc_dev_rss_hash_update,
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
-#endif
.filter_ctrl = sfc_dev_filter_ctrl,
.set_mc_addr_list = sfc_set_mc_addr_list,
.rxq_info_get = sfc_rx_queue_info_get,
@@ -1671,6 +1678,7 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.fw_version_get = sfc_fw_version_get,
.xstats_get_by_id = sfc_xstats_get_by_id,
.xstats_get_names_by_id = sfc_xstats_get_names_by_id,
+ .pool_ops_supported = sfc_pool_ops_supported,
};
/**
@@ -1700,6 +1708,7 @@ static int
sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp;
unsigned int avail_caps = 0;
const char *rx_name = NULL;
const char *tx_name = NULL;
@@ -1708,12 +1717,17 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
switch (sa->family) {
case EFX_FAMILY_HUNTINGTON:
case EFX_FAMILY_MEDFORD:
+ case EFX_FAMILY_MEDFORD2:
avail_caps |= SFC_DP_HW_FW_CAP_EF10;
break;
default:
break;
}
+ encp = efx_nic_cfg_get(sa->nic);
+ if (encp->enc_rx_es_super_buffer_supported)
+ avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
+
rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
sfc_kvarg_string_handler, &rx_name);
if (rc != 0)
@@ -1749,7 +1763,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
goto fail_dp_rx_name;
}
- sfc_info(sa, "use %s Rx datapath", sa->dp_rx_name);
+ sfc_notice(sa, "use %s Rx datapath", sa->dp_rx_name);
dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
@@ -1788,7 +1802,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
goto fail_dp_tx_name;
}
- sfc_info(sa, "use %s Tx datapath", sa->dp_tx_name);
+ sfc_notice(sa, "use %s Tx datapath", sa->dp_tx_name);
dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
@@ -1903,6 +1917,7 @@ sfc_register_dp(void)
/* Register once */
if (TAILQ_EMPTY(&sfc_dp_head)) {
/* Prefer EF10 datapath */
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
@@ -1935,15 +1950,13 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
+ sa->logtype_main = sfc_register_logtype(sa, SFC_LOGTYPE_MAIN_STR,
+ RTE_LOG_NOTICE);
+
rc = sfc_kvargs_parse(sa);
if (rc != 0)
goto fail_kvargs_parse;
- rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
- sfc_kvarg_bool_handler, &sa->debug_init);
- if (rc != 0)
- goto fail_kvarg_debug_init;
-
sfc_log_init(sa, "entry");
dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
@@ -1997,7 +2010,6 @@ fail_probe:
dev->data->mac_addrs = NULL;
fail_mac_addrs:
-fail_kvarg_debug_init:
sfc_kvargs_cleanup(sa);
fail_kvargs_parse:
@@ -2048,6 +2060,8 @@ static const struct rte_pci_id pci_id_sfc_efx_map[] = {
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
{ .vendor_id = 0 /* sentinel */ }
};
@@ -2079,6 +2093,17 @@ RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
- SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> "
- SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
- SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);
+ SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
+ SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
+
+RTE_INIT(sfc_driver_register_logtype);
+static void
+sfc_driver_register_logtype(void)
+{
+ int ret;
+
+ ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
+ RTE_LOG_NOTICE);
+ sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;
+}
diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c
index 7abe61ae..f93d30e5 100644
--- a/drivers/net/sfc/sfc_ev.c
+++ b/drivers/net/sfc/sfc_ev.c
@@ -163,6 +163,35 @@ sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
}
static boolean_t
+sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
+ uint32_t pkt_count, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa,
+ "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
+ evq->evq_index, label, id, pkt_count, flags);
+ return B_TRUE;
+}
+
+/* It is not actually used on datapath, but required on RxQ flush */
+static boolean_t
+sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id,
+ __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ if (evq->sa->dp_rx->qrx_ps_ev != NULL)
+ return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id);
+ else
+ return B_FALSE;
+}
+
+static boolean_t
sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
{
struct sfc_evq *evq = arg;
@@ -382,27 +411,11 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
{
struct sfc_evq *evq = arg;
struct sfc_adapter *sa = evq->sa;
- struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
struct rte_eth_link new_link;
- uint64_t new_link_u64;
- uint64_t old_link_u64;
-
- EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
sfc_port_link_mode_to_info(link_mode, &new_link);
-
- new_link_u64 = *(uint64_t *)&new_link;
- do {
- old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
- if (old_link_u64 == new_link_u64)
- break;
-
- if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
- old_link_u64, new_link_u64)) {
- evq->sa->port.lsc_seq++;
- break;
- }
- } while (B_TRUE);
+ if (rte_eth_linkstatus_set(sa->eth_dev, &new_link))
+ evq->sa->port.lsc_seq++;
return B_FALSE;
}
@@ -410,6 +423,7 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
static const efx_ev_callbacks_t sfc_ev_callbacks = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_nop_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
.eec_tx = sfc_ev_nop_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
@@ -425,6 +439,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks = {
static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_efx_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
.eec_tx = sfc_ev_nop_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_rxq_flush_done,
@@ -440,6 +455,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_dp_rx,
+ .eec_rx_ps = sfc_ev_dp_rx_ps,
.eec_tx = sfc_ev_nop_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_rxq_flush_done,
@@ -455,6 +471,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_nop_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
.eec_tx = sfc_ev_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
@@ -470,6 +487,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_nop_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
.eec_tx = sfc_ev_dp_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
@@ -837,7 +855,7 @@ static int
sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
const char *value_str, void *opaque)
{
- uint64_t *value = opaque;
+ uint32_t *value = opaque;
if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
*value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 93cdf8f4..5613d59a 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -7,6 +7,7 @@
* for Solarflare) and Solarflare Communications, Inc.
*/
+#include <rte_byteorder.h>
#include <rte_tailq.h>
#include <rte_common.h>
#include <rte_ethdev_driver.h>
@@ -22,13 +23,17 @@
#include "sfc_filter.h"
#include "sfc_flow.h"
#include "sfc_log.h"
+#include "sfc_dp_rx.h"
/*
* At now flow API is implemented in such a manner that each
- * flow rule is converted to a hardware filter.
+ * flow rule is converted to one or more hardware filters.
* All elements of flow rule (attributes, pattern items, actions)
* correspond to one or more fields in the efx_filter_spec_s structure
* that is responsible for the hardware filter.
+ * If some required field is unset in the flow rule, then a handful
+ * of filter copies will be created to cover all possible values
+ * of such a field.
*/
enum sfc_flow_item_layers {
@@ -57,6 +62,37 @@ static sfc_flow_item_parse sfc_flow_parse_ipv4;
static sfc_flow_item_parse sfc_flow_parse_ipv6;
static sfc_flow_item_parse sfc_flow_parse_tcp;
static sfc_flow_item_parse sfc_flow_parse_udp;
+static sfc_flow_item_parse sfc_flow_parse_vxlan;
+static sfc_flow_item_parse sfc_flow_parse_geneve;
+static sfc_flow_item_parse sfc_flow_parse_nvgre;
+
+typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error);
+
+typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
+ efx_filter_spec_t *spec,
+ struct sfc_filter *filter);
+
+struct sfc_flow_copy_flag {
+ /* EFX filter specification match flag */
+ efx_filter_match_flags_t flag;
+ /* Number of values of corresponding field */
+ unsigned int vals_count;
+ /* Function to set values in specifications */
+ sfc_flow_spec_set_vals *set_vals;
+ /*
+ * Function to check that the specification is suitable
+ * for adding this match flag
+ */
+ sfc_flow_spec_check *spec_check;
+};
+
+static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
+static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
+static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
+static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
+static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
static boolean_t
sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
@@ -85,7 +121,6 @@ sfc_flow_parse_init(const struct rte_flow_item *item,
const uint8_t *spec;
const uint8_t *mask;
const uint8_t *last;
- uint8_t match;
uint8_t supp;
unsigned int i;
@@ -115,13 +150,13 @@ sfc_flow_parse_init(const struct rte_flow_item *item,
return -rte_errno;
}
- mask = (const uint8_t *)def_mask;
+ mask = def_mask;
} else {
- mask = (const uint8_t *)item->mask;
+ mask = item->mask;
}
- spec = (const uint8_t *)item->spec;
- last = (const uint8_t *)item->last;
+ spec = item->spec;
+ last = item->last;
if (spec == NULL)
goto exit;
@@ -146,12 +181,11 @@ sfc_flow_parse_init(const struct rte_flow_item *item,
return -rte_errno;
}
- /* Check that mask and spec not asks for more match than supp_mask */
+ /* Check that mask does not ask for more match than supp_mask */
for (i = 0; i < size; i++) {
- match = spec[i] | mask[i];
supp = ((const uint8_t *)supp_mask)[i];
- if ((match | supp) != supp) {
+ if (~supp & mask[i]) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"Item's field is not supported");
@@ -184,11 +218,11 @@ sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
* Convert Ethernet item to EFX filter specification.
*
* @param item[in]
- * Item specification. Only source and destination addresses and
- * Ethernet type fields are supported. In addition to full and
- * empty masks of destination address, individual/group mask is
- * also supported. If the mask is NULL, default mask will be used.
- * Ranging is not supported.
+ * Item specification. Outer frame specification may only comprise
+ * source/destination addresses and Ethertype field.
+ * Inner frame specification may contain destination address only.
+ * There is support for individual/group mask as well as for empty and full.
+ * If the mask is NULL, default mask will be used. Ranging is not supported.
* @param efx_spec[in, out]
* EFX filter specification to update.
* @param[out] error
@@ -207,15 +241,32 @@ sfc_flow_parse_eth(const struct rte_flow_item *item,
.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.type = 0xffff,
};
+ const struct rte_flow_item_eth ifrm_supp_mask = {
+ .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ };
const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0x00
};
+ const struct rte_flow_item_eth *supp_mask_p;
+ const struct rte_flow_item_eth *def_mask_p;
+ uint8_t *loc_mac = NULL;
+ boolean_t is_ifrm = (efx_spec->efs_encap_type !=
+ EFX_TUNNEL_PROTOCOL_NONE);
+
+ if (is_ifrm) {
+ supp_mask_p = &ifrm_supp_mask;
+ def_mask_p = &ifrm_supp_mask;
+ loc_mac = efx_spec->efs_ifrm_loc_mac;
+ } else {
+ supp_mask_p = &supp_mask;
+ def_mask_p = &rte_flow_item_eth_mask;
+ loc_mac = efx_spec->efs_loc_mac;
+ }
rc = sfc_flow_parse_init(item,
(const void **)&spec,
(const void **)&mask,
- &supp_mask,
- &rte_flow_item_eth_mask,
+ supp_mask_p, def_mask_p,
sizeof(struct rte_flow_item_eth),
error);
if (rc != 0)
@@ -226,21 +277,30 @@ sfc_flow_parse_eth(const struct rte_flow_item *item,
return 0;
if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
- efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
- rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_LOC_MAC :
+ EFX_FILTER_MATCH_LOC_MAC;
+ rte_memcpy(loc_mac, spec->dst.addr_bytes,
EFX_MAC_ADDR_LEN);
} else if (memcmp(mask->dst.addr_bytes, ig_mask,
EFX_MAC_ADDR_LEN) == 0) {
if (is_unicast_ether_addr(&spec->dst))
- efx_spec->efs_match_flags |=
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
else
- efx_spec->efs_match_flags |=
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
} else if (!is_zero_ether_addr(&mask->dst)) {
goto fail_bad_mask;
}
+ /*
+ * ifrm_supp_mask ensures that the source address and
+ * ethertype masks are equal to zero in inner frame,
+ * so these fields are filled in only for the outer frame
+ */
if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
@@ -291,6 +351,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
const struct rte_flow_item_vlan *mask = NULL;
const struct rte_flow_item_vlan supp_mask = {
.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+ .inner_type = RTE_BE16(0xffff),
};
rc = sfc_flow_parse_init(item,
@@ -333,6 +394,22 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
return -rte_errno;
}
+ if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN TPID matching is not supported");
+ return -rte_errno;
+ }
+ if (mask->inner_type == supp_mask.inner_type) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
+ } else if (mask->inner_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask for VLAN inner_type");
+ return -rte_errno;
+ }
+
return 0;
}
@@ -696,6 +773,253 @@ fail_bad_mask:
return -rte_errno;
}
+/*
+ * Filters for encapsulated packets match based on the EtherType and IP
+ * protocol in the outer frame.
+ */
+static int
+sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ uint8_t ip_proto,
+ struct rte_flow_error *error)
+{
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = ip_proto;
+ } else if (efx_spec->efs_ip_proto != ip_proto) {
+ switch (ip_proto) {
+ case EFX_IPPROTO_UDP:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer IP header protocol must be UDP "
+ "in VxLAN/GENEVE pattern");
+ return -rte_errno;
+
+ case EFX_IPPROTO_GRE:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer IP header protocol must be GRE "
+ "in NVGRE pattern");
+ return -rte_errno;
+
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Only VxLAN/GENEVE/NVGRE tunneling patterns "
+ "are supported");
+ return -rte_errno;
+ }
+ }
+
+ if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+ efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
+ efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer frame EtherType in pattern with tunneling "
+ "must be IPv4 or IPv6");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
+ const uint8_t *vni_or_vsid_val,
+ const uint8_t *vni_or_vsid_mask,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
+ 0xff, 0xff, 0xff
+ };
+
+ if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
+ EFX_VNI_OR_VSID_LEN) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
+ rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
+ EFX_VNI_OR_VSID_LEN);
+ } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported VNI/VSID mask");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Convert VXLAN item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only VXLAN network identifier field is supported.
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_vxlan(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_vxlan *spec = NULL;
+ const struct rte_flow_item_vxlan *mask = NULL;
+ const struct rte_flow_item_vxlan supp_mask = {
+ .vni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_UDP, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
+ mask->vni, item, error);
+
+ return rc;
+}
+
+/**
+ * Convert GENEVE item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only Virtual Network Identifier and protocol type
+ * fields are supported. But protocol type can be only Ethernet (0x6558).
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_geneve(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_geneve *spec = NULL;
+ const struct rte_flow_item_geneve *mask = NULL;
+ const struct rte_flow_item_geneve supp_mask = {
+ .protocol = RTE_BE16(0xffff),
+ .vni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_geneve_mask,
+ sizeof(struct rte_flow_item_geneve),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_UDP, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ if (mask->protocol == supp_mask.protocol) {
+ if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GENEVE encap. protocol must be Ethernet "
+ "(0x6558) in the GENEVE pattern item");
+ return -rte_errno;
+ }
+ } else if (mask->protocol != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported mask for GENEVE encap. protocol");
+ return -rte_errno;
+ }
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
+ mask->vni, item, error);
+
+ return rc;
+}
+
+/**
+ * Convert NVGRE item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only virtual subnet ID field is supported.
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_nvgre(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_nvgre *spec = NULL;
+ const struct rte_flow_item_nvgre *mask = NULL;
+ const struct rte_flow_item_nvgre supp_mask = {
+ .tni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_nvgre_mask,
+ sizeof(struct rte_flow_item_nvgre),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_GRE, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
+ mask->tni, item, error);
+
+ return rc;
+}
+
static const struct sfc_flow_item sfc_flow_items[] = {
{
.type = RTE_FLOW_ITEM_TYPE_VOID,
@@ -739,6 +1063,24 @@ static const struct sfc_flow_item sfc_flow_items[] = {
.layer = SFC_FLOW_ITEM_L4,
.parse = sfc_flow_parse_udp,
},
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ .prev_layer = SFC_FLOW_ITEM_L4,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_vxlan,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_GENEVE,
+ .prev_layer = SFC_FLOW_ITEM_L4,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_geneve,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_NVGRE,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_nvgre,
+ },
};
/*
@@ -773,6 +1115,12 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr,
"Egress is not supported");
return -rte_errno;
}
+ if (attr->transfer != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
if (attr->ingress == 0) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
@@ -780,8 +1128,8 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr,
return -rte_errno;
}
- flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
- flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+ flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
+ flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
return 0;
}
@@ -806,6 +1154,7 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
{
int rc;
unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
+ boolean_t is_ifrm = B_FALSE;
const struct sfc_flow_item *item;
if (pattern == NULL) {
@@ -837,7 +1186,41 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
return -rte_errno;
}
- rc = item->parse(pattern, &flow->spec, error);
+ /*
+ * Allow only VOID and ETH pattern items in the inner frame.
+ * Also check that there is only one tunneling protocol.
+ */
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ if (is_ifrm) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "More than one tunneling protocol");
+ return -rte_errno;
+ }
+ is_ifrm = B_TRUE;
+ break;
+
+ default:
+ if (is_ifrm) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "There is an unsupported pattern item "
+ "in the inner frame");
+ return -rte_errno;
+ }
+ break;
+ }
+
+ rc = item->parse(pattern, &flow->spec.template, error);
if (rc != 0)
return rc;
@@ -859,28 +1242,27 @@ sfc_flow_parse_queue(struct sfc_adapter *sa,
return -EINVAL;
rxq = sa->rxq_info[queue->index].rxq;
- flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
+ flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
return 0;
}
-#if EFSYS_OPT_RX_SCALE
static int
sfc_flow_parse_rss(struct sfc_adapter *sa,
- const struct rte_flow_action_rss *rss,
+ const struct rte_flow_action_rss *action_rss,
struct rte_flow *flow)
{
+ struct sfc_rss *rss = &sa->rss;
unsigned int rxq_sw_index;
struct sfc_rxq *rxq;
unsigned int rxq_hw_index_min;
unsigned int rxq_hw_index_max;
- const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
- uint64_t rss_hf;
- uint8_t *rss_key = NULL;
+ efx_rx_hash_type_t efx_hash_types;
+ const uint8_t *rss_key;
struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
unsigned int i;
- if (rss->num == 0)
+ if (action_rss->queue_num == 0)
return -EINVAL;
rxq_sw_index = sa->rxq_count - 1;
@@ -888,8 +1270,8 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
rxq_hw_index_min = rxq->hw_index;
rxq_hw_index_max = 0;
- for (i = 0; i < rss->num; ++i) {
- rxq_sw_index = rss->queue[i];
+ for (i = 0; i < action_rss->queue_num; ++i) {
+ rxq_sw_index = action_rss->queue[i];
if (rxq_sw_index >= sa->rxq_count)
return -EINVAL;
@@ -903,28 +1285,62 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
rxq_hw_index_max = rxq->hw_index;
}
- rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
- if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
+ switch (action_rss->func) {
+ case RTE_ETH_HASH_FUNCTION_DEFAULT:
+ case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (action_rss->level)
return -EINVAL;
- if (rss_conf != NULL) {
- if (rss_conf->rss_key_len != sizeof(sa->rss_key))
+ /*
+ * Dummy RSS action with only one queue and no specific settings
+ * for hash types and key does not require dedicated RSS context
+ * and may be simplified to single queue action.
+ */
+ if (action_rss->queue_num == 1 && action_rss->types == 0 &&
+ action_rss->key_len == 0) {
+ flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
+ return 0;
+ }
+
+ if (action_rss->types) {
+ int rc;
+
+ rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
+ &efx_hash_types);
+ if (rc != 0)
+ return -rc;
+ } else {
+ unsigned int i;
+
+ efx_hash_types = 0;
+ for (i = 0; i < rss->hf_map_nb_entries; ++i)
+ efx_hash_types |= rss->hf_map[i].efx;
+ }
+
+ if (action_rss->key_len) {
+ if (action_rss->key_len != sizeof(rss->key))
return -EINVAL;
- rss_key = rss_conf->rss_key;
+ rss_key = action_rss->key;
} else {
- rss_key = sa->rss_key;
+ rss_key = rss->key;
}
flow->rss = B_TRUE;
sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
- sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
- rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
+ sfc_rss_conf->rss_hash_types = efx_hash_types;
+ rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
- unsigned int rxq_sw_index = rss->queue[i % rss->num];
+ unsigned int nb_queues = action_rss->queue_num;
+ unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
@@ -932,47 +1348,101 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
return 0;
}
-#endif /* EFSYS_OPT_RX_SCALE */
+
+static int
+sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
+ unsigned int filters_count)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < filters_count; i++) {
+ int rc;
+
+ rc = efx_filter_remove(sa->nic, &spec->filters[i]);
+ if (ret == 0 && rc != 0) {
+ sfc_err(sa, "failed to remove filter specification "
+ "(rc = %d)", rc);
+ ret = rc;
+ }
+ }
+
+ return ret;
+}
+
+static int
+sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < spec->count; i++) {
+ rc = efx_filter_insert(sa->nic, &spec->filters[i]);
+ if (rc != 0) {
+ sfc_flow_spec_flush(sa, spec, i);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
+{
+ return sfc_flow_spec_flush(sa, spec, spec->count);
+}
static int
sfc_flow_filter_insert(struct sfc_adapter *sa,
struct rte_flow *flow)
{
- efx_filter_spec_t *spec = &flow->spec;
-
-#if EFSYS_OPT_RX_SCALE
- struct sfc_flow_rss *rss = &flow->rss_conf;
+ struct sfc_rss *rss = &sa->rss;
+ struct sfc_flow_rss *flow_rss = &flow->rss_conf;
+ uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+ unsigned int i;
int rc = 0;
if (flow->rss) {
- unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
- rss->rxq_hw_index_min + 1,
+ unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
+ flow_rss->rxq_hw_index_min + 1,
EFX_MAXRSS);
rc = efx_rx_scale_context_alloc(sa->nic,
EFX_RX_SCALE_EXCLUSIVE,
rss_spread,
- &spec->efs_rss_context);
+ &efs_rss_context);
if (rc != 0)
goto fail_scale_context_alloc;
- rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context,
- EFX_RX_HASHALG_TOEPLITZ,
- rss->rss_hash_types, B_TRUE);
+ rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
+ rss->hash_alg,
+ flow_rss->rss_hash_types, B_TRUE);
if (rc != 0)
goto fail_scale_mode_set;
- rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context,
- rss->rss_key,
- sizeof(sa->rss_key));
+ rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
+ flow_rss->rss_key,
+ sizeof(rss->key));
if (rc != 0)
goto fail_scale_key_set;
- spec->efs_dmaq_id = rss->rxq_hw_index_min;
- spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+ /*
+ * At this point, fully elaborated filter specifications
+ * have been produced from the template. To make sure that
+ * RSS behaviour is consistent between them, set the same
+ * RSS context value everywhere.
+ */
+ for (i = 0; i < flow->spec.count; i++) {
+ efx_filter_spec_t *spec = &flow->spec.filters[i];
+
+ spec->efs_rss_context = efs_rss_context;
+ spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
+ spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+ }
}
- rc = efx_filter_insert(sa->nic, spec);
+ rc = sfc_flow_spec_insert(sa, &flow->spec);
if (rc != 0)
goto fail_filter_insert;
@@ -985,8 +1455,9 @@ sfc_flow_filter_insert(struct sfc_adapter *sa,
* the HW knows all the information needed to verify
* the table entries, and the operation will succeed
*/
- rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context,
- rss->rss_tbl, RTE_DIM(rss->rss_tbl));
+ rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
+ flow_rss->rss_tbl,
+ RTE_DIM(flow_rss->rss_tbl));
if (rc != 0)
goto fail_scale_tbl_set;
}
@@ -994,48 +1465,72 @@ sfc_flow_filter_insert(struct sfc_adapter *sa,
return 0;
fail_scale_tbl_set:
- efx_filter_remove(sa->nic, spec);
+ sfc_flow_spec_remove(sa, &flow->spec);
fail_filter_insert:
fail_scale_key_set:
fail_scale_mode_set:
- if (flow->rss)
- efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
+ if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
+ efx_rx_scale_context_free(sa->nic, efs_rss_context);
fail_scale_context_alloc:
return rc;
-#else /* !EFSYS_OPT_RX_SCALE */
- return efx_filter_insert(sa->nic, spec);
-#endif /* EFSYS_OPT_RX_SCALE */
}
static int
sfc_flow_filter_remove(struct sfc_adapter *sa,
struct rte_flow *flow)
{
- efx_filter_spec_t *spec = &flow->spec;
int rc = 0;
- rc = efx_filter_remove(sa->nic, spec);
+ rc = sfc_flow_spec_remove(sa, &flow->spec);
if (rc != 0)
return rc;
-#if EFSYS_OPT_RX_SCALE
- if (flow->rss)
+ if (flow->rss) {
+ /*
+ * All specifications for a given flow rule have the same RSS
+ * context, so that RSS context value is taken from the first
+ * filter specification
+ */
+ efx_filter_spec_t *spec = &flow->spec.filters[0];
+
rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
-#endif /* EFSYS_OPT_RX_SCALE */
+ }
return rc;
}
static int
+sfc_flow_parse_mark(struct sfc_adapter *sa,
+ const struct rte_flow_action_mark *mark,
+ struct rte_flow *flow)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+
+ if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
+ return EINVAL;
+
+ flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
+ flow->spec.template.efs_mark = mark->id;
+
+ return 0;
+}
+
+static int
sfc_flow_parse_actions(struct sfc_adapter *sa,
const struct rte_flow_action actions[],
struct rte_flow *flow,
struct rte_flow_error *error)
{
int rc;
- boolean_t is_specified = B_FALSE;
+ const unsigned int dp_rx_features = sa->dp_rx->features;
+ uint32_t actions_set = 0;
+ const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
+ (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
+ (1UL << RTE_FLOW_ACTION_TYPE_DROP);
+ const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
+ (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
if (actions == NULL) {
rte_flow_error_set(error, EINVAL,
@@ -1044,12 +1539,22 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
return -rte_errno;
}
+#define SFC_BUILD_SET_OVERFLOW(_action, _set) \
+ RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
+
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
+ actions_set);
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
+ actions_set);
+ if ((actions_set & fate_actions_mask) != 0)
+ goto fail_fate_actions;
+
rc = sfc_flow_parse_queue(sa, actions->conf, flow);
if (rc != 0) {
rte_flow_error_set(error, EINVAL,
@@ -1057,23 +1562,71 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
"Bad QUEUE action");
return -rte_errno;
}
-
- is_specified = B_TRUE;
break;
-#if EFSYS_OPT_RX_SCALE
case RTE_FLOW_ACTION_TYPE_RSS:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
+ actions_set);
+ if ((actions_set & fate_actions_mask) != 0)
+ goto fail_fate_actions;
+
rc = sfc_flow_parse_rss(sa, actions->conf, flow);
if (rc != 0) {
- rte_flow_error_set(error, rc,
+ rte_flow_error_set(error, -rc,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"Bad RSS action");
return -rte_errno;
}
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
+ actions_set);
+ if ((actions_set & fate_actions_mask) != 0)
+ goto fail_fate_actions;
+
+ flow->spec.template.efs_dmaq_id =
+ EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
+ actions_set);
+ if ((actions_set & mark_actions_mask) != 0)
+ goto fail_actions_overlap;
+
+ if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "FLAG action is not supported on the current Rx datapath");
+ return -rte_errno;
+ }
- is_specified = B_TRUE;
+ flow->spec.template.efs_flags |=
+ EFX_FILTER_FLAG_ACTION_FLAG;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
+ actions_set);
+ if ((actions_set & mark_actions_mask) != 0)
+ goto fail_actions_overlap;
+
+ if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "MARK action is not supported on the current Rx datapath");
+ return -rte_errno;
+ }
+
+ rc = sfc_flow_parse_mark(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad MARK action");
+ return -rte_errno;
+ }
break;
-#endif /* EFSYS_OPT_RX_SCALE */
default:
rte_flow_error_set(error, ENOTSUP,
@@ -1081,12 +1634,529 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
"Action is not supported");
return -rte_errno;
}
+
+ actions_set |= (1UL << actions->type);
+ }
+#undef SFC_BUILD_SET_OVERFLOW
+
+ /* When fate is unknown, drop traffic. */
+ if ((actions_set & fate_actions_mask) == 0) {
+ flow->spec.template.efs_dmaq_id =
+ EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
}
- if (!is_specified) {
+ return 0;
+
+fail_fate_actions:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Cannot combine several fate-deciding actions, "
+ "choose between QUEUE, RSS or DROP");
+ return -rte_errno;
+
+fail_actions_overlap:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Overlapping actions are not supported");
+ return -rte_errno;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
+ * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
+ * specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ static const efx_filter_match_flags_t vals[] = {
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
+ };
+
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
- "Action is unspecified");
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect while copying "
+ "by unknown destination flags");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ /* The check above ensures that divisor can't be zero here */
+ spec->filters[i].efs_match_flags |=
+ vals[i / filters_count_for_one_val];
+ }
+
+ return 0;
+}
+
+/**
+ * Check that the following conditions are met:
+ * - the list of supported filters has a filter
+ * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
+ * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
+ * be inserted.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
+ __rte_unused efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t match_mcast_dst;
+
+ match_mcast_dst =
+ (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_mcast_dst == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
+ * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
+ * specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same EtherType value, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ static const uint16_t vals[] = {
+ EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
+ };
+
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect "
+ "while copying by Ethertype");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ spec->filters[i].efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE;
+
+ /*
+ * The check above ensures that
+ * filters_count_for_one_val is not 0
+ */
+ spec->filters[i].efs_ether_type =
+ vals[i / filters_count_for_one_val];
+ }
+
+ return 0;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
+ * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
+ * specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ static const efx_filter_match_flags_t vals[] = {
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
+ };
+
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect while copying "
+ "by inner frame unknown destination flags");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ /* The check above ensures that divisor can't be zero here */
+ spec->filters[i].efs_match_flags |=
+ vals[i / filters_count_for_one_val];
+ }
+
+ return 0;
+}
+
+/**
+ * Check that the following conditions are met:
+ * - the specification corresponds to a filter for encapsulated traffic
+ * - the list of supported filters has a filter
+ * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
+ * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
+ * be inserted.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
+ efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
+ efx_filter_match_flags_t match_mcast_dst;
+
+ if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
+ return B_FALSE;
+
+ match_mcast_dst =
+ (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_mcast_dst == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+/*
+ * Match flags that can be automatically added to filters.
+ * Selecting the last minimum when searching for the copy flag ensures that the
+ * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
+ * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
+ * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
+ * filters.
+ */
+static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
+ {
+ .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
+ .vals_count = 2,
+ .set_vals = sfc_flow_set_unknown_dst_flags,
+ .spec_check = sfc_flow_check_unknown_dst_flags,
+ },
+ {
+ .flag = EFX_FILTER_MATCH_ETHER_TYPE,
+ .vals_count = 2,
+ .set_vals = sfc_flow_set_ethertypes,
+ .spec_check = NULL,
+ },
+ {
+ .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
+ .vals_count = 2,
+ .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
+ .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
+ },
+};
+
+/* Get item from array sfc_flow_copy_flags */
+static const struct sfc_flow_copy_flag *
+sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
+ if (sfc_flow_copy_flags[i].flag == flag)
+ return &sfc_flow_copy_flags[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * Make copies of the specifications, set match flag and values
+ * of the field that corresponds to it.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param flag[in]
+ * The match flag to add.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
+ efx_filter_match_flags_t flag,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ unsigned int new_filters_count;
+ unsigned int filters_count_for_one_val;
+ const struct sfc_flow_copy_flag *copy_flag;
+ int rc;
+
+ copy_flag = sfc_flow_get_copy_flag(flag);
+ if (copy_flag == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Unsupported spec field for copying");
+ return -rte_errno;
+ }
+
+ new_filters_count = spec->count * copy_flag->vals_count;
+ if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Too much EFX specifications in the flow rule");
+ return -rte_errno;
+ }
+
+ /* Copy filters specifications */
+ for (i = spec->count; i < new_filters_count; i++)
+ spec->filters[i] = spec->filters[i - spec->count];
+
+ filters_count_for_one_val = spec->count;
+ spec->count = new_filters_count;
+
+ rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * Check that the given set of match flags missing in the original filter spec
+ * could be covered by adding spec copies which specify the corresponding
+ * flags and packet field values to match.
+ *
+ * @param miss_flags[in]
+ * Flags that are missing until the supported filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter.
+ *
+ * @return
+ * Number of specifications after copy or 0, if the flags can not be added.
+ */
+static unsigned int
+sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
+ efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t copy_flags = 0;
+ efx_filter_match_flags_t flag;
+ efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
+ sfc_flow_spec_check *check;
+ unsigned int multiplier = 1;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
+ flag = sfc_flow_copy_flags[i].flag;
+ check = sfc_flow_copy_flags[i].spec_check;
+ if ((flag & miss_flags) == flag) {
+ if (check != NULL && (!check(match, spec, filter)))
+ continue;
+
+ copy_flags |= flag;
+ multiplier *= sfc_flow_copy_flags[i].vals_count;
+ }
+ }
+
+ if (copy_flags == miss_flags)
+ return multiplier;
+
+ return 0;
+}
+
+/**
+ * Attempt to supplement the specification template to the minimally
+ * supported set of match flags. To do this, it is necessary to copy
+ * the specifications, filling them with the values of fields that
+ * correspond to the missing flags.
+ * The necessary and sufficient filter is built from the fewest number
+ * of copies which could be made to cover the minimally required set
+ * of flags.
+ *
+ * @param sa[in]
+ * SFC adapter.
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
+ struct sfc_flow_spec *spec,
+ struct rte_flow_error *error)
+{
+ struct sfc_filter *filter = &sa->filter;
+ efx_filter_match_flags_t miss_flags;
+ efx_filter_match_flags_t min_miss_flags = 0;
+ efx_filter_match_flags_t match;
+ unsigned int min_multiplier = UINT_MAX;
+ unsigned int multiplier;
+ unsigned int i;
+ int rc;
+
+ match = spec->template.efs_match_flags;
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if ((match & filter->supported_match[i]) == match) {
+ miss_flags = filter->supported_match[i] & (~match);
+ multiplier = sfc_flow_check_missing_flags(miss_flags,
+ &spec->template, filter);
+ if (multiplier > 0) {
+ if (multiplier <= min_multiplier) {
+ min_multiplier = multiplier;
+ min_miss_flags = miss_flags;
+ }
+ }
+ }
+ }
+
+ if (min_multiplier == UINT_MAX) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "The flow rule pattern is unsupported");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
+ efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
+
+ if ((flag & min_miss_flags) == flag) {
+ rc = sfc_flow_spec_add_match_flag(spec, flag, error);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Check that set of match flags is referred to by a filter. Filter is
+ * described by match flags with the ability to add OUTER_VID and INNER_VID
+ * flags.
+ *
+ * @param match_flags[in]
+ * Set of match flags.
+ * @param flags_pattern[in]
+ * Pattern of filter match flags.
+ */
+static boolean_t
+sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
+ efx_filter_match_flags_t flags_pattern)
+{
+ if ((match_flags & flags_pattern) != flags_pattern)
+ return B_FALSE;
+
+ switch (match_flags & ~flags_pattern) {
+ case 0:
+ case EFX_FILTER_MATCH_OUTER_VID:
+ case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
+ return B_TRUE;
+ default:
+ return B_FALSE;
+ }
+}
+
+/**
+ * Check whether the spec maps to a hardware filter which is known to be
+ * ineffective despite being valid.
+ *
+ * @param spec[in]
+ * SFC flow specification.
+ */
+static boolean_t
+sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
+{
+ unsigned int i;
+ uint16_t ether_type;
+ uint8_t ip_proto;
+ efx_filter_match_flags_t match_flags;
+
+ for (i = 0; i < spec->count; i++) {
+ match_flags = spec->filters[i].efs_match_flags;
+
+ if (sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE) ||
+ sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_LOC_MAC)) {
+ ether_type = spec->filters[i].efs_ether_type;
+ if (ether_type == EFX_ETHER_TYPE_IPV4 ||
+ ether_type == EFX_ETHER_TYPE_IPV6)
+ return B_TRUE;
+ } else if (sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO) ||
+ sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_MAC)) {
+ ip_proto = spec->filters[i].efs_ip_proto;
+ if (ip_proto == EFX_IPPROTO_TCP ||
+ ip_proto == EFX_IPPROTO_UDP)
+ return B_TRUE;
+ }
+ }
+
+ return B_FALSE;
+}
+
+static int
+sfc_flow_validate_match_flags(struct sfc_adapter *sa,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ efx_filter_spec_t *spec_tmpl = &flow->spec.template;
+ efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
+ int rc;
+
+ /* Initialize the first filter spec with template */
+ flow->spec.filters[0] = *spec_tmpl;
+ flow->spec.count = 1;
+
+ if (!sfc_filter_is_match_supported(sa, match_flags)) {
+ rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (sfc_flow_is_match_flags_exception(&flow->spec)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "The flow rule pattern is unsupported");
return -rte_errno;
}
@@ -1116,12 +2186,11 @@ sfc_flow_parse(struct rte_eth_dev *dev,
if (rc != 0)
goto fail_bad_value;
- if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Flow rule pattern is not supported");
- return -rte_errno;
- }
+ rc = sfc_flow_validate_match_flags(sa, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ return 0;
fail_bad_value:
return rc;
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index 35472ad3..71ec18cb 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -19,7 +19,13 @@
extern "C" {
#endif
-#if EFSYS_OPT_RX_SCALE
+/*
+ * The maximum number of fully elaborated hardware filter specifications
+ * which can be produced from a template by means of multiplication, if
+ * missing match flags are needed to be taken into account
+ */
+#define SF_FLOW_SPEC_NB_FILTERS_MAX 8
+
/* RSS configuration storage */
struct sfc_flow_rss {
unsigned int rxq_hw_index_min;
@@ -28,15 +34,22 @@ struct sfc_flow_rss {
uint8_t rss_key[EFX_RSS_KEY_SIZE];
unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
};
-#endif /* EFSYS_OPT_RX_SCALE */
+
+/* Filter specification storage */
+struct sfc_flow_spec {
+ /* partial specification from flow rule */
+ efx_filter_spec_t template;
+ /* fully elaborated hardware filters specifications */
+ efx_filter_spec_t filters[SF_FLOW_SPEC_NB_FILTERS_MAX];
+ /* number of complete specifications */
+ unsigned int count;
+};
/* PMD-specific definition of the opaque type from rte_flow.h */
struct rte_flow {
- efx_filter_spec_t spec; /* filter specification */
-#if EFSYS_OPT_RX_SCALE
+ struct sfc_flow_spec spec; /* flow spec for hardware filter(s) */
boolean_t rss; /* RSS toggle */
struct sfc_flow_rss rss_conf; /* RSS configuration */
-#endif /* EFSYS_OPT_RX_SCALE */
TAILQ_ENTRY(rte_flow) entries; /* flow list entries */
};
diff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c
index d6c84927..fbdc7eea 100644
--- a/drivers/net/sfc/sfc_intr.c
+++ b/drivers/net/sfc/sfc_intr.c
@@ -86,7 +86,7 @@ sfc_intr_line_handler(void *cb_arg)
exit:
if (lsc_seq != sa->port.lsc_seq) {
- sfc_info(sa, "link status change event: link %s",
+ sfc_notice(sa, "link status change event: link %s",
sa->eth_dev->data->dev_link.link_status ?
"UP" : "DOWN");
_rte_eth_dev_callback_process(sa->eth_dev,
@@ -130,7 +130,7 @@ sfc_intr_message_handler(void *cb_arg)
exit:
if (lsc_seq != sa->port.lsc_seq) {
- sfc_info(sa, "link status change event");
+ sfc_notice(sa, "link status change event");
_rte_eth_dev_callback_process(sa->eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
@@ -251,7 +251,7 @@ sfc_intr_configure(struct sfc_adapter *sa)
intr->handler = NULL;
intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0);
if (!intr->lsc_intr) {
- sfc_info(sa, "LSC tracking using interrupts is disabled");
+ sfc_notice(sa, "LSC tracking using interrupts is disabled");
goto done;
}
diff --git a/drivers/net/sfc/sfc_kvargs.c b/drivers/net/sfc/sfc_kvargs.c
index 8f55e99b..7a89c769 100644
--- a/drivers/net/sfc/sfc_kvargs.c
+++ b/drivers/net/sfc/sfc_kvargs.c
@@ -23,11 +23,11 @@ sfc_kvargs_parse(struct sfc_adapter *sa)
struct rte_devargs *devargs = eth_dev->device->devargs;
const char **params = (const char *[]){
SFC_KVARG_STATS_UPDATE_PERIOD_MS,
- SFC_KVARG_DEBUG_INIT,
- SFC_KVARG_MCDI_LOGGING,
SFC_KVARG_PERF_PROFILE,
SFC_KVARG_RX_DATAPATH,
SFC_KVARG_TX_DATAPATH,
+ SFC_KVARG_FW_VARIANT,
+ SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
NULL,
};
diff --git a/drivers/net/sfc/sfc_kvargs.h b/drivers/net/sfc/sfc_kvargs.h
index e7044ca6..4506667a 100644
--- a/drivers/net/sfc/sfc_kvargs.h
+++ b/drivers/net/sfc/sfc_kvargs.h
@@ -18,10 +18,6 @@ extern "C" {
#define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]"
-#define SFC_KVARG_DEBUG_INIT "debug_init"
-
-#define SFC_KVARG_MCDI_LOGGING "mcdi_logging"
-
#define SFC_KVARG_PERF_PROFILE "perf_profile"
#define SFC_KVARG_PERF_PROFILE_AUTO "auto"
@@ -37,11 +33,13 @@ extern "C" {
#define SFC_KVARG_DATAPATH_EFX "efx"
#define SFC_KVARG_DATAPATH_EF10 "ef10"
#define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple"
+#define SFC_KVARG_DATAPATH_EF10_ESSB "ef10_essb"
#define SFC_KVARG_RX_DATAPATH "rx_datapath"
#define SFC_KVARG_VALUES_RX_DATAPATH \
"[" SFC_KVARG_DATAPATH_EFX "|" \
- SFC_KVARG_DATAPATH_EF10 "]"
+ SFC_KVARG_DATAPATH_EF10 "|" \
+ SFC_KVARG_DATAPATH_EF10_ESSB "]"
#define SFC_KVARG_TX_DATAPATH "tx_datapath"
#define SFC_KVARG_VALUES_TX_DATAPATH \
@@ -49,6 +47,22 @@ extern "C" {
SFC_KVARG_DATAPATH_EF10 "|" \
SFC_KVARG_DATAPATH_EF10_SIMPLE "]"
+#define SFC_KVARG_FW_VARIANT "fw_variant"
+
+#define SFC_KVARG_FW_VARIANT_DONT_CARE "dont-care"
+#define SFC_KVARG_FW_VARIANT_FULL_FEATURED "full-feature"
+#define SFC_KVARG_FW_VARIANT_LOW_LATENCY "ultra-low-latency"
+#define SFC_KVARG_FW_VARIANT_PACKED_STREAM "capture-packed-stream"
+#define SFC_KVARG_FW_VARIANT_DPDK "dpdk"
+#define SFC_KVARG_VALUES_FW_VARIANT \
+ "[" SFC_KVARG_FW_VARIANT_DONT_CARE "|" \
+ SFC_KVARG_FW_VARIANT_FULL_FEATURED "|" \
+ SFC_KVARG_FW_VARIANT_LOW_LATENCY "|" \
+ SFC_KVARG_FW_VARIANT_PACKED_STREAM "|" \
+ SFC_KVARG_FW_VARIANT_DPDK "]"
+
+#define SFC_KVARG_RXD_WAIT_TIMEOUT_NS "rxd_wait_timeout_ns"
+
struct sfc_adapter;
int sfc_kvargs_parse(struct sfc_adapter *sa);
diff --git a/drivers/net/sfc/sfc_log.h b/drivers/net/sfc/sfc_log.h
index a18191ed..d6f34352 100644
--- a/drivers/net/sfc/sfc_log.h
+++ b/drivers/net/sfc/sfc_log.h
@@ -10,14 +10,35 @@
#ifndef _SFC_LOG_H_
#define _SFC_LOG_H_
+/** Generic driver log type */
+extern uint32_t sfc_logtype_driver;
+
+/** Common log type name prefix */
+#define SFC_LOGTYPE_PREFIX "pmd.net.sfc."
+
+/** Log PMD generic message, add a prefix and a line break */
+#define SFC_GENERIC_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, sfc_logtype_driver, \
+ RTE_FMT("PMD: " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__ ,)))
+
+/** Name prefix for the per-device log type used to report basic information */
+#define SFC_LOGTYPE_MAIN_STR SFC_LOGTYPE_PREFIX "main"
+
+/** Device MCDI log type name prefix */
+#define SFC_LOGTYPE_MCDI_STR SFC_LOGTYPE_PREFIX "mcdi"
+
+/** Level value used by MCDI log statements */
+#define SFC_LOG_LEVEL_MCDI RTE_LOG_INFO
+
/* Log PMD message, automatically add prefix and \n */
-#define SFC_LOG(sa, level, ...) \
+#define SFC_LOG(sa, level, type, ...) \
do { \
const struct sfc_adapter *__sa = (sa); \
\
- RTE_LOG(level, PMD, \
- RTE_FMT("sfc_efx " PCI_PRI_FMT " #%" PRIu8 ": " \
- RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ rte_log(level, type, \
+ RTE_FMT("PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu8 \
+ ": " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
__sa->pci_addr.domain, \
__sa->pci_addr.bus, \
__sa->pci_addr.devid, \
@@ -27,27 +48,55 @@
} while (0)
#define sfc_err(sa, ...) \
- SFC_LOG(sa, ERR, __VA_ARGS__)
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_ERR, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
#define sfc_warn(sa, ...) \
- SFC_LOG(sa, WARNING, __VA_ARGS__)
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_WARNING, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
#define sfc_notice(sa, ...) \
- SFC_LOG(sa, NOTICE, __VA_ARGS__)
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_NOTICE, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
#define sfc_info(sa, ...) \
- SFC_LOG(sa, INFO, __VA_ARGS__)
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
#define sfc_log_init(sa, ...) \
do { \
const struct sfc_adapter *_sa = (sa); \
\
- if (_sa->debug_init) \
- SFC_LOG(_sa, INFO, \
- RTE_FMT("%s(): " \
- RTE_FMT_HEAD(__VA_ARGS__,), \
- __func__, \
- RTE_FMT_TAIL(__VA_ARGS__,))); \
+ SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \
+ RTE_FMT("%s(): " \
+ RTE_FMT_HEAD(__VA_ARGS__ ,), \
+ __func__, \
+ RTE_FMT_TAIL(__VA_ARGS__ ,))); \
} while (0)
+#define sfc_log_mcdi(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, SFC_LOG_LEVEL_MCDI, _sa->mcdi.logtype, \
+ __VA_ARGS__); \
+ } while (0)
+
+
#endif /* _SFC_LOG_H_ */
diff --git a/drivers/net/sfc/sfc_mcdi.c b/drivers/net/sfc/sfc_mcdi.c
index 9d92b8c5..007506b4 100644
--- a/drivers/net/sfc/sfc_mcdi.c
+++ b/drivers/net/sfc/sfc_mcdi.c
@@ -15,7 +15,6 @@
#include "sfc.h"
#include "sfc_log.h"
-#include "sfc_kvargs.h"
#include "sfc_ev.h"
#define SFC_MCDI_POLL_INTERVAL_MIN_US 10 /* 10us in 1us units */
@@ -176,7 +175,7 @@ sfc_mcdi_do_log(const struct sfc_adapter *sa,
* at the end which is required by netlogdecode.
*/
buffer[position] = '\0';
- sfc_info(sa, "%s \\", buffer);
+ sfc_log_mcdi(sa, "%s \\", buffer);
/* Preserve prefix for the next log message */
position = pfxsize;
}
@@ -198,10 +197,17 @@ sfc_mcdi_logger(void *arg, efx_log_msg_t type,
size_t pfxsize;
size_t start;
- if (!sa->mcdi.logging)
+ /*
+ * Unlike the other cases, MCDI logging implies more onerous work
+ * needed to produce a message. If the dynamic log level prevents
+ * the end result from being printed, the CPU time will be wasted.
+ *
+ * To avoid wasting time, the actual level is examined in advance.
+ */
+ if (rte_log_get_level(sa->mcdi.logtype) < (int)SFC_LOG_LEVEL_MCDI)
return;
- /* The format including prefix added by sfc_info() is the format
+ /* The format including prefix added by sfc_log_mcdi() is the format
* consumed by the Solarflare netlogdecode tool.
*/
pfxsize = snprintf(buffer, sizeof(buffer), "MCDI RPC %s:",
@@ -212,7 +218,7 @@ sfc_mcdi_logger(void *arg, efx_log_msg_t type,
start = sfc_mcdi_do_log(sa, buffer, data, data_size, pfxsize, start);
if (start != pfxsize) {
buffer[start] = '\0';
- sfc_info(sa, "%s", buffer);
+ sfc_log_mcdi(sa, "%s", buffer);
}
}
@@ -250,11 +256,8 @@ sfc_mcdi_init(struct sfc_adapter *sa)
if (rc != 0)
goto fail_dma_alloc;
- /* Convert negative error to positive used in the driver */
- rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING,
- sfc_kvarg_bool_handler, &mcdi->logging);
- if (rc != 0)
- goto fail_kvargs_process;
+ mcdi->logtype = sfc_register_logtype(sa, SFC_LOGTYPE_MCDI_STR,
+ RTE_LOG_NOTICE);
emtp = &mcdi->transport;
emtp->emt_context = sa;
@@ -274,8 +277,6 @@ sfc_mcdi_init(struct sfc_adapter *sa)
fail_mcdi_init:
memset(emtp, 0, sizeof(*emtp));
-
-fail_kvargs_process:
sfc_dma_free(sa, &mcdi->mem);
fail_dma_alloc:
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index c423f527..5384dbbd 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -121,6 +121,28 @@ sfc_port_init_dev_link(struct sfc_adapter *sa)
return 0;
}
+#if EFSYS_OPT_LOOPBACK
+
+static efx_link_mode_t
+sfc_port_phy_caps_to_max_link_speed(uint32_t phy_caps)
+{
+ if (phy_caps & (1u << EFX_PHY_CAP_100000FDX))
+ return EFX_LINK_100000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_50000FDX))
+ return EFX_LINK_50000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_40000FDX))
+ return EFX_LINK_40000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_25000FDX))
+ return EFX_LINK_25000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_10000FDX))
+ return EFX_LINK_10000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_1000FDX))
+ return EFX_LINK_1000FDX;
+ return EFX_LINK_UNKNOWN;
+}
+
+#endif
+
int
sfc_port_start(struct sfc_adapter *sa)
{
@@ -143,6 +165,21 @@ sfc_port_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_port_init;
+#if EFSYS_OPT_LOOPBACK
+ if (sa->eth_dev->data->dev_conf.lpbk_mode != 0) {
+ efx_link_mode_t link_mode;
+
+ link_mode =
+ sfc_port_phy_caps_to_max_link_speed(port->phy_adv_cap);
+ sfc_log_init(sa, "set loopback link_mode=%u type=%u", link_mode,
+ sa->eth_dev->data->dev_conf.lpbk_mode);
+ rc = efx_port_loopback_set(sa->nic, link_mode,
+ sa->eth_dev->data->dev_conf.lpbk_mode);
+ if (rc != 0)
+ goto fail_loopback_set;
+ }
+#endif
+
sfc_log_init(sa, "set flow control to %#x autoneg=%u",
port->flow_ctrl, port->flow_ctrl_autoneg);
rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl,
@@ -155,6 +192,16 @@ sfc_port_start(struct sfc_adapter *sa)
SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0);
phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps);
+ /*
+ * No controls for FEC yet. Use default FEC mode.
+ * I.e. advertise everything supported (*_FEC=1), but do not request
+ * anything explicitly (*_FEC_REQUESTED=0).
+ */
+ phy_adv_cap |= port->phy_adv_cap_mask &
+ (1u << EFX_PHY_CAP_BASER_FEC |
+ 1u << EFX_PHY_CAP_RS_FEC |
+ 1u << EFX_PHY_CAP_25G_BASER_FEC);
+
sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap);
rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap);
if (rc != 0)
@@ -268,6 +315,9 @@ fail_mac_addr_set:
fail_mac_pdu_set:
fail_phy_adv_cap_set:
fail_mac_fcntl_set:
+#if EFSYS_OPT_LOOPBACK
+fail_loopback_set:
+#endif
efx_port_fini(sa->nic);
fail_port_init:
@@ -323,6 +373,8 @@ sfc_port_attach(struct sfc_adapter *sa)
struct sfc_port *port = &sa->port;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
const struct ether_addr *from;
+ uint32_t mac_nstats;
+ size_t mac_stats_size;
long kvarg_stats_update_period_ms;
int rc;
@@ -358,7 +410,9 @@ sfc_port_attach(struct sfc_adapter *sa)
if (port->mac_stats_buf == NULL)
goto fail_mac_stats_buf_alloc;
- rc = sfc_dma_alloc(sa, "mac_stats", 0, EFX_MAC_STATS_SIZE,
+ mac_nstats = efx_nic_cfg_get(sa->nic)->enc_mac_stats_nstats;
+ mac_stats_size = RTE_ALIGN(mac_nstats * sizeof(uint64_t), EFX_BUF_SIZE);
+ rc = sfc_dma_alloc(sa, "mac_stats", 0, mac_stats_size,
sa->socket_id, &port->mac_stats_dma_mem);
if (rc != 0)
goto fail_mac_stats_dma_alloc;
@@ -470,10 +524,22 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
link_info->link_speed = ETH_SPEED_NUM_10G;
link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
break;
+ case EFX_LINK_25000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_25G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
case EFX_LINK_40000FDX:
link_info->link_speed = ETH_SPEED_NUM_40G;
link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
break;
+ case EFX_LINK_50000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_50G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_100000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_100G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
default:
SFC_ASSERT(B_FALSE);
/* FALLTHROUGH */
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index abc53fb5..cc76a5b1 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -184,7 +184,6 @@ sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
return ptypes;
}
-#if EFSYS_OPT_RX_SCALE
static void
sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
struct rte_mbuf *m)
@@ -205,14 +204,6 @@ sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
m->ol_flags |= PKT_RX_RSS_HASH;
}
}
-#else
-static void
-sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq,
- __rte_unused unsigned int flags,
- __rte_unused struct rte_mbuf *m)
-{
-}
-#endif
static uint16_t
sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
@@ -393,6 +384,7 @@ sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
static int
sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ __rte_unused struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
unsigned int *rxq_max_fill_level)
@@ -608,7 +600,7 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
sfc_err(sa, "RxQ %u flush failed", sw_index);
if (rxq->state & SFC_RXQ_FLUSHED)
- sfc_info(sa, "RxQ %u flushed", sw_index);
+ sfc_notice(sa, "RxQ %u flushed", sw_index);
}
sa->dp_rx->qpurge(rxq->dp);
@@ -617,7 +609,8 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
static int
sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
{
- boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
+ struct sfc_rss *rss = &sa->rss;
+ boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
struct sfc_port *port = &sa->port;
int rc;
@@ -629,7 +622,7 @@ sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
* repeat this step without promiscuous and all-multicast flags set
*/
retry:
- rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
+ rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
if (rc == 0)
return 0;
else if (rc != EOPNOTSUPP)
@@ -687,10 +680,37 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
if (rc != 0)
goto fail_ev_qstart;
- rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
- &rxq->mem, rxq_info->entries,
- 0 /* not used on EF10 */, rxq_info->type_flags,
- evq->common, &rxq->common);
+ switch (rxq_info->type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
+ &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
+ rxq_info->type_flags, evq->common, &rxq->common);
+ break;
+ case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
+ struct rte_mempool *mp = rxq->refill_mb_pool;
+ struct rte_mempool_info mp_info;
+
+ rc = rte_mempool_ops_get_info(mp, &mp_info);
+ if (rc != 0) {
+ /* Positive errno is used in the driver */
+ rc = -rc;
+ goto fail_mp_get_info;
+ }
+ if (mp_info.contig_block_size <= 0) {
+ rc = EINVAL;
+ goto fail_bad_contig_block_size;
+ }
+ rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
+ mp_info.contig_block_size, rxq->buf_size,
+ mp->header_size + mp->elt_size + mp->trailer_size,
+ sa->rxd_wait_timeout_ns,
+ &rxq->mem, rxq_info->entries, rxq_info->type_flags,
+ evq->common, &rxq->common);
+ break;
+ }
+ default:
+ rc = ENOTSUP;
+ }
if (rc != 0)
goto fail_rx_qcreate;
@@ -721,6 +741,8 @@ fail_dp_qstart:
sfc_rx_qflush(sa, sw_index);
fail_rx_qcreate:
+fail_bad_contig_block_size:
+fail_mp_get_info:
sfc_ev_qstop(evq);
fail_ev_qstart:
@@ -792,48 +814,10 @@ sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
return caps;
}
-static void
-sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
- const char *verdict, uint64_t offloads)
-{
- unsigned long long bit;
-
- while ((bit = __builtin_ffsll(offloads)) != 0) {
- uint64_t flag = (1ULL << --bit);
-
- sfc_err(sa, "Rx %s offload %s %s", offload_group,
- rte_eth_dev_rx_offload_name(flag), verdict);
-
- offloads &= ~flag;
- }
-}
-
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
- uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
- uint64_t rejected = requested & ~supported;
- uint64_t missing = (requested & mandatory) ^ mandatory;
- boolean_t mismatch = B_FALSE;
-
- if (rejected) {
- sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
- mismatch = B_TRUE;
- }
-
- if (missing) {
- sfc_rx_log_offloads(sa, "queue", "must be set", missing);
- mismatch = B_TRUE;
- }
-
- return mismatch;
-}
-
static int
sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
- const struct rte_eth_rxconf *rx_conf)
+ const struct rte_eth_rxconf *rx_conf,
+ uint64_t offloads)
{
uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
sfc_rx_get_queue_offload_caps(sa);
@@ -858,17 +842,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
rc = EINVAL;
}
- if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+ if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
DEV_RX_OFFLOAD_CHECKSUM)
sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
- if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
- rc = EINVAL;
-
return rc;
}
@@ -887,7 +868,7 @@ sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
order = MIN(order, rte_bsf32(data_off));
- return 1u << (order - 1);
+ return 1u << order;
}
static uint16_t
@@ -979,26 +960,29 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
struct rte_mempool *mb_pool)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_rss *rss = &sa->rss;
int rc;
unsigned int rxq_entries;
unsigned int evq_entries;
unsigned int rxq_max_fill_level;
+ uint64_t offloads;
uint16_t buf_size;
struct sfc_rxq_info *rxq_info;
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
- rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries,
- &rxq_max_fill_level);
+ rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
+ &evq_entries, &rxq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
- SFC_ASSERT(rxq_entries >= nb_rx_desc);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
- rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+ offloads = rx_conf->offloads |
+ sa->eth_dev->data->dev_conf.rxmode.offloads;
+ rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
@@ -1011,7 +995,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
}
if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
"object size is too small", sw_index);
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1027,9 +1011,14 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
rxq_info->entries = rxq_entries;
- rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+
+ if (sa->dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
+ rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
+ else
+ rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+
rxq_info->type_flags =
- (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+ (offloads & DEV_RX_OFFLOAD_SCATTER) ?
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
@@ -1054,6 +1043,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
rxq->refill_threshold =
RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
rxq->refill_mb_pool = mb_pool;
+ rxq->buf_size = buf_size;
rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
socket_id, &rxq->mem);
@@ -1068,10 +1058,8 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.batch_max = encp->enc_rx_batch_max;
info.prefix_size = encp->enc_rx_prefix_size;
-#if EFSYS_OPT_RX_SCALE
- if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
+ if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0)
info.flags |= SFC_RXQ_FLAG_RSS_HASH;
-#endif
info.rxq_entries = rxq_info->entries;
info.rxq_hw_ring = rxq->mem.esm_base;
@@ -1079,6 +1067,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = rxq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
+ info.vi_window_shift = encp->enc_vi_window_shift;
rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
&RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
@@ -1140,85 +1129,228 @@ sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
rte_free(rxq);
}
-#if EFSYS_OPT_RX_SCALE
-efx_rx_hash_type_t
-sfc_rte_to_efx_hash_type(uint64_t rss_hf)
+/*
+ * Mapping between RTE RSS hash functions and their EFX counterparts.
+ */
+struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
+ { ETH_RSS_NONFRAG_IPV4_TCP,
+ EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV4_UDP,
+ EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+ EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+ EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
+ { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+ EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
+ EFX_RX_HASH(IPV4, 2TUPLE) },
+ { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
+ ETH_RSS_IPV6_EX,
+ EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
+ EFX_RX_HASH(IPV6, 2TUPLE) }
+};
+
+static efx_rx_hash_type_t
+sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
+ unsigned int *hash_type_flags_supported,
+ unsigned int nb_hash_type_flags_supported)
{
- efx_rx_hash_type_t efx_hash_types = 0;
+ efx_rx_hash_type_t hash_type_masked = 0;
+ unsigned int i, j;
+
+ for (i = 0; i < nb_hash_type_flags_supported; ++i) {
+ unsigned int class_tuple_lbn[] = {
+ EFX_RX_CLASS_IPV4_TCP_LBN,
+ EFX_RX_CLASS_IPV4_UDP_LBN,
+ EFX_RX_CLASS_IPV4_LBN,
+ EFX_RX_CLASS_IPV6_TCP_LBN,
+ EFX_RX_CLASS_IPV6_UDP_LBN,
+ EFX_RX_CLASS_IPV6_LBN
+ };
+
+ for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
+ unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
+ unsigned int flag;
+
+ tuple_mask <<= class_tuple_lbn[j];
+ flag = hash_type & tuple_mask;
+
+ if (flag == hash_type_flags_supported[i])
+ hash_type_masked |= flag;
+ }
+ }
- if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
- efx_hash_types |= EFX_RX_HASH_IPV4;
+ return hash_type_masked;
+}
+
+int
+sfc_rx_hash_init(struct sfc_adapter *sa)
+{
+ struct sfc_rss *rss = &sa->rss;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
+ efx_rx_hash_alg_t alg;
+ unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
+ unsigned int nb_flags_supp;
+ struct sfc_rss_hf_rte_to_efx *hf_map;
+ struct sfc_rss_hf_rte_to_efx *entry;
+ efx_rx_hash_type_t efx_hash_types;
+ unsigned int i;
+ int rc;
+
+ if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
+ alg = EFX_RX_HASHALG_TOEPLITZ;
+ else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
+ alg = EFX_RX_HASHALG_PACKED_STREAM;
+ else
+ return EINVAL;
- if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
- efx_hash_types |= EFX_RX_HASH_TCPIPV4;
+ rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
+ &nb_flags_supp);
+ if (rc != 0)
+ return rc;
- if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
- efx_hash_types |= EFX_RX_HASH_IPV6;
+ hf_map = rte_calloc_socket("sfc-rss-hf-map",
+ RTE_DIM(sfc_rss_hf_map),
+ sizeof(*hf_map), 0, sa->socket_id);
+ if (hf_map == NULL)
+ return ENOMEM;
+
+ entry = hf_map;
+ efx_hash_types = 0;
+ for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
+ efx_rx_hash_type_t ht;
+
+ ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
+ flags_supp, nb_flags_supp);
+ if (ht != 0) {
+ entry->rte = sfc_rss_hf_map[i].rte;
+ entry->efx = ht;
+ efx_hash_types |= ht;
+ ++entry;
+ }
+ }
- if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
- efx_hash_types |= EFX_RX_HASH_TCPIPV6;
+ rss->hash_alg = alg;
+ rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
+ rss->hf_map = hf_map;
+ rss->hash_types = efx_hash_types;
- return efx_hash_types;
+ return 0;
+}
+
+void
+sfc_rx_hash_fini(struct sfc_adapter *sa)
+{
+ struct sfc_rss *rss = &sa->rss;
+
+ rte_free(rss->hf_map);
+}
+
+int
+sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
+ efx_rx_hash_type_t *efx)
+{
+ struct sfc_rss *rss = &sa->rss;
+ efx_rx_hash_type_t hash_types = 0;
+ unsigned int i;
+
+ for (i = 0; i < rss->hf_map_nb_entries; ++i) {
+ uint64_t rte_mask = rss->hf_map[i].rte;
+
+ if ((rte & rte_mask) != 0) {
+ rte &= ~rte_mask;
+ hash_types |= rss->hf_map[i].efx;
+ }
+ }
+
+ if (rte != 0) {
+ sfc_err(sa, "unsupported hash functions requested");
+ return EINVAL;
+ }
+
+ *efx = hash_types;
+
+ return 0;
}
uint64_t
-sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
+sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx)
{
- uint64_t rss_hf = 0;
+ struct sfc_rss *rss = &sa->rss;
+ uint64_t rte = 0;
+ unsigned int i;
- if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
- rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER);
+ for (i = 0; i < rss->hf_map_nb_entries; ++i) {
+ efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
- if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if ((efx & hash_type) == hash_type)
+ rte |= rss->hf_map[i].rte;
+ }
- if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
- rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
+ return rte;
+}
- if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
- rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
+static int
+sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
+ struct rte_eth_rss_conf *conf)
+{
+ struct sfc_rss *rss = &sa->rss;
+ efx_rx_hash_type_t efx_hash_types = rss->hash_types;
+ uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types);
+ int rc;
+
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
+ if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
+ conf->rss_key != NULL)
+ return EINVAL;
+ }
+
+ if (conf->rss_hf != 0) {
+ rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (conf->rss_key != NULL) {
+ if (conf->rss_key_len != sizeof(rss->key)) {
+ sfc_err(sa, "RSS key size is wrong (should be %lu)",
+ sizeof(rss->key));
+ return EINVAL;
+ }
+ rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
+ }
+
+ rss->hash_types = efx_hash_types;
- return rss_hf;
+ return 0;
}
-#endif
-#if EFSYS_OPT_RX_SCALE
static int
sfc_rx_rss_config(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
int rc = 0;
- if (sa->rss_channels > 0) {
+ if (rss->channels > 0) {
rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- EFX_RX_HASHALG_TOEPLITZ,
- sa->rss_hash_types, B_TRUE);
+ rss->hash_alg, rss->hash_types,
+ B_TRUE);
if (rc != 0)
goto finish;
rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- sa->rss_key,
- sizeof(sa->rss_key));
+ rss->key, sizeof(rss->key));
if (rc != 0)
goto finish;
rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- sa->rss_tbl, RTE_DIM(sa->rss_tbl));
+ rss->tbl, RTE_DIM(rss->tbl));
}
finish:
return rc;
}
-#else
-static int
-sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa)
-{
- return 0;
-}
-#endif
int
sfc_rx_start(struct sfc_adapter *sa)
@@ -1292,35 +1424,25 @@ sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
static int
sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
{
- uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
- uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported;
+ struct sfc_rss *rss = &sa->rss;
int rc = 0;
switch (rxmode->mq_mode) {
case ETH_MQ_RX_NONE:
/* No special checks are required */
break;
-#if EFSYS_OPT_RX_SCALE
case ETH_MQ_RX_RSS:
- if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
sfc_err(sa, "RSS is not available");
rc = EINVAL;
}
break;
-#endif
default:
sfc_err(sa, "Rx multi-queue mode %u not supported",
rxmode->mq_mode);
rc = EINVAL;
}
- if (offloads_rejected) {
- sfc_rx_log_offloads(sa, "device", "is unsupported",
- offloads_rejected);
- rc = EINVAL;
- }
-
if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
sfc_warn(sa, "FCS stripping cannot be disabled - always on");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
@@ -1361,6 +1483,7 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
int
sfc_rx_configure(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
int rc;
@@ -1410,21 +1533,26 @@ sfc_rx_configure(struct sfc_adapter *sa)
sa->rxq_count++;
}
-#if EFSYS_OPT_RX_SCALE
- sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
- MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+ rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+ MIN(sa->rxq_count, EFX_MAXRSS) : 0;
- if (sa->rss_channels > 0) {
+ if (rss->channels > 0) {
+ struct rte_eth_rss_conf *adv_conf_rss;
unsigned int sw_index;
for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
- sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
+ rss->tbl[sw_index] = sw_index % rss->channels;
+
+ adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
+ rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
+ if (rc != 0)
+ goto fail_rx_process_adv_conf_rss;
}
-#endif
done:
return 0;
+fail_rx_process_adv_conf_rss:
fail_rx_qinit_info:
fail_rxqs_realloc:
fail_rxqs_alloc:
@@ -1443,9 +1571,11 @@ fail_check_mode:
void
sfc_rx_close(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
+
sfc_rx_fini_queues(sa, 0);
- sa->rss_channels = 0;
+ rss->channels = 0;
rte_free(sa->rxq_info);
sa->rxq_info = NULL;
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 6706ee6f..3fba7d8a 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -60,6 +60,7 @@ struct sfc_rxq {
unsigned int hw_index;
unsigned int refill_threshold;
struct rte_mempool *refill_mb_pool;
+ uint16_t buf_size;
struct sfc_dp_rxq *dp;
unsigned int state;
};
@@ -152,10 +153,12 @@ unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa,
unsigned int sw_index);
int sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset);
-#if EFSYS_OPT_RX_SCALE
-efx_rx_hash_type_t sfc_rte_to_efx_hash_type(uint64_t rss_hf);
-uint64_t sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types);
-#endif
+int sfc_rx_hash_init(struct sfc_adapter *sa);
+void sfc_rx_hash_fini(struct sfc_adapter *sa);
+int sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
+ efx_rx_hash_type_t *efx);
+uint64_t sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa,
+ efx_rx_hash_type_t efx);
#ifdef __cplusplus
}
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index ba8496df..effe9853 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -164,7 +164,8 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
sent_seq = rte_be_to_cpu_32(sent_seq);
- efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz,
+ efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq,
+ m->tso_segsz,
*pend, EFX_TX_FATSOV2_OPT_NDESCS);
*pend += EFX_TX_FATSOV2_OPT_NDESCS;
diff --git a/drivers/net/sfc/sfc_tweak.h b/drivers/net/sfc/sfc_tweak.h
index b4026851..4d543f68 100644
--- a/drivers/net/sfc/sfc_tweak.h
+++ b/drivers/net/sfc/sfc_tweak.h
@@ -34,4 +34,12 @@
/** Number of mbufs to be freed in bulk in a single call */
#define SFC_TX_REAP_BULK_SIZE 32
+/**
+ * Default head-of-line block timeout to wait for Rx descriptor before
+ * packet drop because of no descriptors available.
+ *
+ * DPDK FW variant only with equal stride super-buffer Rx mode.
+ */
+#define SFC_RXD_WAIT_TIMEOUT_NS_DEF (200U * 1000)
+
#endif /* _SFC_TWEAK_H_ */
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 757b03ba..1bcc2c69 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -73,48 +73,10 @@ sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
return caps;
}
-static void
-sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
- const char *verdict, uint64_t offloads)
-{
- unsigned long long bit;
-
- while ((bit = __builtin_ffsll(offloads)) != 0) {
- uint64_t flag = (1ULL << --bit);
-
- sfc_err(sa, "Tx %s offload %s %s", offload_group,
- rte_eth_dev_tx_offload_name(flag), verdict);
-
- offloads &= ~flag;
- }
-}
-
-static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
- uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
- uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
- sfc_tx_get_queue_offload_caps(sa);
- uint64_t rejected = requested & ~supported;
- uint64_t missing = (requested & mandatory) ^ mandatory;
- boolean_t mismatch = B_FALSE;
-
- if (rejected) {
- sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
- mismatch = B_TRUE;
- }
-
- if (missing) {
- sfc_tx_log_offloads(sa, "queue", "must be set", missing);
- mismatch = B_TRUE;
- }
-
- return mismatch;
-}
-
static int
sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf,
+ uint64_t offloads)
{
int rc = 0;
@@ -138,15 +100,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
}
/* We either perform both TCP and UDP offload, or no offload at all */
- if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
- ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+ if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+ ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
sfc_err(sa, "TCP and UDP offloads can't be set independently");
rc = EINVAL;
}
- if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
- rc = EINVAL;
-
return rc;
}
@@ -171,6 +130,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
struct sfc_txq *txq;
int rc = 0;
struct sfc_dp_tx_qcreate_info info;
+ uint64_t offloads;
sfc_log_init(sa, "TxQ = %u", sw_index);
@@ -183,7 +143,9 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(txq_entries >= nb_tx_desc);
SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
- rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+ offloads = tx_conf->offloads |
+ sa->eth_dev->data->dev_conf.txmode.offloads;
+ rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
@@ -210,7 +172,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
txq->flags = tx_conf->txq_flags;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = offloads;
rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
socket_id, &txq->mem);
@@ -221,7 +183,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.max_fill_level = txq_max_fill_level;
info.free_thresh = txq->free_thresh;
info.flags = tx_conf->txq_flags;
- info.offloads = tx_conf->offloads;
+ info.offloads = offloads;
info.txq_entries = txq_info->entries;
info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
info.txq_hw_ring = txq->mem.esm_base;
@@ -229,6 +191,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = txq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
+ info.vi_window_shift = encp->enc_vi_window_shift;
rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
&RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
@@ -303,9 +266,6 @@ sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
static int
sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
{
- uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
- sfc_tx_get_queue_offload_caps(sa);
- uint64_t offloads_rejected = txmode->offloads & ~offloads_supported;
int rc = 0;
switch (txmode->mq_mode) {
@@ -336,12 +296,6 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
rc = EINVAL;
}
- if (offloads_rejected) {
- sfc_tx_log_offloads(sa, "device", "is unsupported",
- offloads_rejected);
- rc = EINVAL;
- }
-
return rc;
}
@@ -495,8 +449,7 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
(txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
flags |= EFX_TXQ_CKSUM_TCPUDP;
- if ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
@@ -606,7 +559,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
sfc_err(sa, "TxQ %u flush timed out", sw_index);
if (txq->state & SFC_TXQ_FLUSHED)
- sfc_info(sa, "TxQ %u flushed", sw_index);
+ sfc_notice(sa, "TxQ %u flushed", sw_index);
}
sa->dp_tx->qreap(txq->dp);
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index b0c13415..6b3c13e5 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -67,6 +67,12 @@ static const struct rte_eth_dev_info pmd_dev_info = {
},
};
+static int pmd_softnic_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static void
pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
@@ -522,13 +528,15 @@ pmd_ethdev_register(struct rte_vdev_device *vdev,
soft_dev->data->dev_private = dev_private;
soft_dev->data->dev_link.link_speed = hard_speed;
soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- soft_dev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
+ soft_dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
soft_dev->data->mac_addrs = &eth_addr;
soft_dev->data->promiscuous = 1;
soft_dev->data->kdrv = RTE_KDRV_NONE;
soft_dev->data->numa_node = numa_node;
+ rte_eth_dev_probing_finish(soft_dev);
+
return 0;
}
@@ -725,13 +733,27 @@ pmd_probe(struct rte_vdev_device *vdev)
uint16_t hard_port_id;
int numa_node;
void *dev_private;
+ struct rte_eth_dev *eth_dev;
+ const char *name = rte_vdev_device_name(vdev);
- RTE_LOG(INFO, PMD,
- "Probing device \"%s\"\n",
- rte_vdev_device_name(vdev));
+ PMD_LOG(INFO, "Probing device \"%s\"", name);
/* Parse input arguments */
params = rte_vdev_device_args(vdev);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &pmd_ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
if (!params)
return -EINVAL;
@@ -763,8 +785,8 @@ pmd_probe(struct rte_vdev_device *vdev)
return -ENOMEM;
/* Register soft ethdev */
- RTE_LOG(INFO, PMD,
- "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
+ PMD_LOG(INFO,
+ "Creating soft ethdev \"%s\" for hard ethdev \"%s\"",
p.soft.name, p.hard.name);
status = pmd_ethdev_register(vdev, &p, dev_private);
@@ -785,7 +807,7 @@ pmd_remove(struct rte_vdev_device *vdev)
if (!vdev)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
+ PMD_LOG(INFO, "Removing device \"%s\"",
rte_vdev_device_name(vdev));
/* Find the ethdev entry */
@@ -820,3 +842,12 @@ RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
PMD_PARAM_HARD_NAME "=<string> "
PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");
+
+RTE_INIT(pmd_softnic_init_log);
+static void
+pmd_softnic_init_log(void)
+{
+ pmd_softnic_logtype = rte_log_register("pmd.net.softnic");
+ if (pmd_softnic_logtype >= 0)
+ rte_log_set_level(pmd_softnic_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 79f1c6a8..11d638a9 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -479,6 +479,8 @@ static const struct rte_tm_capabilities tm_cap = {
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_head_drop_supported = 0,
.cman_wred_context_n_max = 0,
.cman_wred_context_private_n_max = 0,
@@ -667,6 +669,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.shaper_shared_n_max = 0,
.cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_wred_context_private_supported = WRED_SUPPORTED,
.cman_wred_context_shared_n_max = 0,
@@ -828,6 +832,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
{.leaf = {
.cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_wred_context_private_supported = WRED_SUPPORTED,
.cman_wred_context_shared_n_max = 0,
} },
@@ -1243,12 +1249,23 @@ wred_profile_check(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
+ /* WRED profile should be in packet mode */
+ if (profile->packet_mode == 0)
+ return -rte_tm_error_set(error,
+ ENOTSUP,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(ENOTSUP));
+
/* min_th <= max_th, max_th > 0 */
for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
- uint16_t min_th = profile->red_params[color].min_th;
- uint16_t max_th = profile->red_params[color].max_th;
+ uint32_t min_th = profile->red_params[color].min_th;
+ uint32_t max_th = profile->red_params[color].max_th;
- if (min_th > max_th || max_th == 0)
+ if (min_th > max_th ||
+ max_th == 0 ||
+ min_th > UINT16_MAX ||
+ max_th > UINT16_MAX)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_WRED_PROFILE,
diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile
index 0ebd3ec5..b77fae16 100644
--- a/drivers/net/szedata2/Makefile
+++ b/drivers/net/szedata2/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright (c) 2015 CESNET
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of CESNET nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015 CESNET
include $(RTE_SDK)/mk/rte.vars.mk
@@ -51,7 +23,6 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2_iobuf.c
#
# Export include files
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index e53c738d..910c64d0 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015 - 2016 CESNET
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of CESNET nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 - 2016 CESNET
*/
#include <stdint.h>
@@ -50,10 +21,9 @@
#include <rte_memcpy.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_atomic.h>
#include "rte_eth_szedata2.h"
-#include "szedata2_iobuf.h"
+#include "szedata2_logs.h"
#define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
#define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
@@ -68,9 +38,53 @@
#define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
+/**
+ * Format string for suffix used to differentiate between Ethernet ports
+ * on the same PCI device.
+ */
+#define SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT "-port%u"
+
+/**
+ * Maximum number of ports for one device.
+ */
+#define SZEDATA2_MAX_PORTS 2
+
+/**
+ * Entry in list of PCI devices for this driver.
+ */
+struct pci_dev_list_entry;
+struct pci_dev_list_entry {
+ LIST_ENTRY(pci_dev_list_entry) next;
+ struct rte_pci_device *pci_dev;
+ unsigned int port_count;
+};
+
+/* List of PCI devices with number of ports for this driver. */
+LIST_HEAD(pci_dev_list, pci_dev_list_entry) szedata2_pci_dev_list =
+ LIST_HEAD_INITIALIZER(szedata2_pci_dev_list);
+
+struct port_info {
+ unsigned int rx_base_id;
+ unsigned int tx_base_id;
+ unsigned int rx_count;
+ unsigned int tx_count;
+ int numa_node;
+};
+
+struct pmd_internals {
+ struct rte_eth_dev *dev;
+ uint16_t max_rx_queues;
+ uint16_t max_tx_queues;
+ unsigned int rxq_base_id;
+ unsigned int txq_base_id;
+ char *sze_dev_path;
+};
+
struct szedata2_rx_queue {
+ struct pmd_internals *priv;
struct szedata *sze;
uint8_t rx_channel;
+ uint16_t qid;
uint16_t in_port;
struct rte_mempool *mb_pool;
volatile uint64_t rx_pkts;
@@ -79,21 +93,17 @@ struct szedata2_rx_queue {
};
struct szedata2_tx_queue {
+ struct pmd_internals *priv;
struct szedata *sze;
uint8_t tx_channel;
+ uint16_t qid;
volatile uint64_t tx_pkts;
volatile uint64_t tx_bytes;
volatile uint64_t err_pkts;
};
-struct pmd_internals {
- struct szedata2_rx_queue rx_queue[RTE_ETH_SZEDATA2_MAX_RX_QUEUES];
- struct szedata2_tx_queue tx_queue[RTE_ETH_SZEDATA2_MAX_TX_QUEUES];
- uint16_t max_rx_queues;
- uint16_t max_tx_queues;
- char sze_dev[PATH_MAX];
- struct rte_mem_resource *pci_rsc;
-};
+int szedata2_logtype_init;
+int szedata2_logtype_driver;
static struct ether_addr eth_addr = {
.addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
@@ -133,8 +143,10 @@ eth_szedata2_rx(void *queue,
for (i = 0; i < nb_pkts; i++) {
mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
- if (unlikely(mbuf == NULL))
+ if (unlikely(mbuf == NULL)) {
+ sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
break;
+ }
/* get the next sze packet */
if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
@@ -318,10 +330,10 @@ eth_szedata2_rx(void *queue,
* sze packet will not fit in one mbuf,
* scattered mode is not enabled, drop packet
*/
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"SZE segment %d bytes will not fit in one mbuf "
"(%d bytes), scattered mode is not enabled, "
- "drop packet!!\n",
+ "drop packet!!",
packet_size, buf_size);
rte_pktmbuf_free(mbuf);
}
@@ -354,6 +366,8 @@ eth_szedata2_rx_scattered(void *queue,
uint16_t packet_len1 = 0;
uint16_t packet_len2 = 0;
uint16_t hw_data_align;
+ uint64_t *mbuf_failed_ptr =
+ &sze_q->priv->dev->data->rx_mbuf_alloc_failed;
if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
return 0;
@@ -541,6 +555,7 @@ eth_szedata2_rx_scattered(void *queue,
sze->ct_rx_lck = ct_rx_lck_backup;
sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
+ sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
break;
}
@@ -590,6 +605,7 @@ eth_szedata2_rx_scattered(void *queue,
ct_rx_rem_bytes_backup;
sze->ct_rx_cur_ptr =
ct_rx_cur_ptr_backup;
+ (*mbuf_failed_ptr)++;
goto finish;
}
@@ -633,6 +649,7 @@ eth_szedata2_rx_scattered(void *queue,
ct_rx_rem_bytes_backup;
sze->ct_rx_cur_ptr =
ct_rx_cur_ptr_backup;
+ (*mbuf_failed_ptr)++;
goto finish;
}
@@ -889,7 +906,7 @@ eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
if (rxq->sze == NULL) {
uint32_t rx = 1 << rxq->rx_channel;
uint32_t tx = 0;
- rxq->sze = szedata_open(internals->sze_dev);
+ rxq->sze = szedata_open(internals->sze_dev_path);
if (rxq->sze == NULL)
return -EINVAL;
ret = szedata_subscribe3(rxq->sze, &rx, &tx);
@@ -934,7 +951,7 @@ eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
if (txq->sze == NULL) {
uint32_t rx = 0;
uint32_t tx = 1 << txq->tx_channel;
- txq->sze = szedata_open(internals->sze_dev);
+ txq->sze = szedata_open(internals->sze_dev_path);
if (txq->sze == NULL)
return -EINVAL;
ret = szedata_subscribe3(txq->sze, &rx, &tx);
@@ -1017,7 +1034,7 @@ static int
eth_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_dev_data *data = dev->data;
- if (data->dev_conf.rxmode.enable_scatter == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
dev->rx_pkt_burst = eth_szedata2_rx_scattered;
data->scattered_rx = 1;
} else {
@@ -1032,13 +1049,17 @@ eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
dev_info->if_index = 0;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = internals->max_rx_queues;
dev_info->max_tx_queues = internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
+ dev_info->tx_offload_capa = 0;
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->speed_capa = ETH_LINK_SPEED_100G;
}
@@ -1054,22 +1075,29 @@ eth_stats_get(struct rte_eth_dev *dev,
uint64_t tx_err_total = 0;
uint64_t rx_total_bytes = 0;
uint64_t tx_total_bytes = 0;
- const struct pmd_internals *internals = dev->data->dev_private;
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_rx; i++) {
- stats->q_ipackets[i] = internals->rx_queue[i].rx_pkts;
- stats->q_ibytes[i] = internals->rx_queue[i].rx_bytes;
- rx_total += stats->q_ipackets[i];
- rx_total_bytes += stats->q_ibytes[i];
+ for (i = 0; i < nb_rx; i++) {
+ struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxq->rx_pkts;
+ stats->q_ibytes[i] = rxq->rx_bytes;
+ }
+ rx_total += rxq->rx_pkts;
+ rx_total_bytes += rxq->rx_bytes;
}
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_tx; i++) {
- stats->q_opackets[i] = internals->tx_queue[i].tx_pkts;
- stats->q_obytes[i] = internals->tx_queue[i].tx_bytes;
- stats->q_errors[i] = internals->tx_queue[i].err_pkts;
- tx_total += stats->q_opackets[i];
- tx_total_bytes += stats->q_obytes[i];
- tx_err_total += stats->q_errors[i];
+ for (i = 0; i < nb_tx; i++) {
+ struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txq->tx_pkts;
+ stats->q_obytes[i] = txq->tx_bytes;
+ stats->q_errors[i] = txq->err_pkts;
+ }
+ tx_total += txq->tx_pkts;
+ tx_total_bytes += txq->tx_bytes;
+ tx_err_total += txq->err_pkts;
}
stats->ipackets = rx_total;
@@ -1077,6 +1105,7 @@ eth_stats_get(struct rte_eth_dev *dev,
stats->ibytes = rx_total_bytes;
stats->obytes = tx_total_bytes;
stats->oerrors = tx_err_total;
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
return 0;
}
@@ -1087,17 +1116,18 @@ eth_stats_reset(struct rte_eth_dev *dev)
uint16_t i;
uint16_t nb_rx = dev->data->nb_rx_queues;
uint16_t nb_tx = dev->data->nb_tx_queues;
- struct pmd_internals *internals = dev->data->dev_private;
for (i = 0; i < nb_rx; i++) {
- internals->rx_queue[i].rx_pkts = 0;
- internals->rx_queue[i].rx_bytes = 0;
- internals->rx_queue[i].err_pkts = 0;
+ struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
+ rxq->rx_pkts = 0;
+ rxq->rx_bytes = 0;
+ rxq->err_pkts = 0;
}
for (i = 0; i < nb_tx; i++) {
- internals->tx_queue[i].tx_pkts = 0;
- internals->tx_queue[i].tx_bytes = 0;
- internals->tx_queue[i].err_pkts = 0;
+ struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
+ txq->tx_pkts = 0;
+ txq->tx_bytes = 0;
+ txq->err_pkts = 0;
}
}
@@ -1105,9 +1135,11 @@ static void
eth_rx_queue_release(void *q)
{
struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
- if (rxq->sze != NULL) {
- szedata_close(rxq->sze);
- rxq->sze = NULL;
+
+ if (rxq != NULL) {
+ if (rxq->sze != NULL)
+ szedata_close(rxq->sze);
+ rte_free(rxq);
}
}
@@ -1115,9 +1147,11 @@ static void
eth_tx_queue_release(void *q)
{
struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
- if (txq->sze != NULL) {
- szedata_close(txq->sze);
- txq->sze = NULL;
+
+ if (txq != NULL) {
+ if (txq->sze != NULL)
+ szedata_close(txq->sze);
+ rte_free(txq);
}
}
@@ -1142,111 +1176,34 @@ eth_dev_close(struct rte_eth_dev *dev)
dev->data->nb_tx_queues = 0;
}
-/**
- * Function takes value from first IBUF status register.
- * Values in IBUF and OBUF should be same.
- *
- * @param internals
- * Pointer to device private structure.
- * @return
- * Link speed constant.
- */
-static inline enum szedata2_link_speed
-get_link_speed(const struct pmd_internals *internals)
-{
- const volatile struct szedata2_ibuf *ibuf =
- ibuf_ptr_by_index(internals->pci_rsc, 0);
- uint32_t speed = (szedata2_read32(&ibuf->ibuf_st) & 0x70) >> 4;
- switch (speed) {
- case 0x03:
- return SZEDATA2_LINK_SPEED_10G;
- case 0x04:
- return SZEDATA2_LINK_SPEED_40G;
- case 0x05:
- return SZEDATA2_LINK_SPEED_100G;
- default:
- return SZEDATA2_LINK_SPEED_DEFAULT;
- }
-}
-
static int
eth_link_update(struct rte_eth_dev *dev,
int wait_to_complete __rte_unused)
{
struct rte_eth_link link;
- struct rte_eth_link *link_ptr = &link;
- struct rte_eth_link *dev_link = &dev->data->dev_link;
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- const volatile struct szedata2_ibuf *ibuf;
- uint32_t i;
- bool link_is_up = false;
-
- switch (get_link_speed(internals)) {
- case SZEDATA2_LINK_SPEED_10G:
- link.link_speed = ETH_SPEED_NUM_10G;
- break;
- case SZEDATA2_LINK_SPEED_40G:
- link.link_speed = ETH_SPEED_NUM_40G;
- break;
- case SZEDATA2_LINK_SPEED_100G:
- link.link_speed = ETH_SPEED_NUM_100G;
- break;
- default:
- link.link_speed = ETH_SPEED_NUM_10G;
- break;
- }
- /* szedata2 uses only full duplex */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf = ibuf_ptr_by_index(internals->pci_rsc, i);
- /*
- * Link is considered up if at least one ibuf is enabled
- * and up.
- */
- if (ibuf_is_enabled(ibuf) && ibuf_is_link_up(ibuf)) {
- link_is_up = true;
- break;
- }
- }
-
- link.link_status = (link_is_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
+ memset(&link, 0, sizeof(link));
+ link.link_speed = ETH_SPEED_NUM_100G;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = ETH_LINK_UP;
link.link_autoneg = ETH_LINK_FIXED;
- rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link,
- *(uint64_t *)link_ptr);
-
+ rte_eth_linkstatus_set(dev, &link);
return 0;
}
static int
-eth_dev_set_link_up(struct rte_eth_dev *dev)
+eth_dev_set_link_up(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++)
- ibuf_enable(ibuf_ptr_by_index(internals->pci_rsc, i));
- for (i = 0; i < szedata2_obuf_count; i++)
- obuf_enable(obuf_ptr_by_index(internals->pci_rsc, i));
+ PMD_DRV_LOG(WARNING, "Setting link up is not supported.");
return 0;
}
static int
-eth_dev_set_link_down(struct rte_eth_dev *dev)
+eth_dev_set_link_down(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++)
- ibuf_disable(ibuf_ptr_by_index(internals->pci_rsc, i));
- for (i = 0; i < szedata2_obuf_count; i++)
- obuf_disable(obuf_ptr_by_index(internals->pci_rsc, i));
+ PMD_DRV_LOG(WARNING, "Setting link down is not supported.");
return 0;
}
@@ -1254,26 +1211,50 @@ static int
eth_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t rx_queue_id,
uint16_t nb_rx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
+ unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mb_pool)
{
- struct pmd_internals *internals = dev->data->dev_private;
- struct szedata2_rx_queue *rxq = &internals->rx_queue[rx_queue_id];
+ struct szedata2_rx_queue *rxq;
int ret;
- uint32_t rx = 1 << rx_queue_id;
+ struct pmd_internals *internals = dev->data->dev_private;
+ uint8_t rx_channel = internals->rxq_base_id + rx_queue_id;
+ uint32_t rx = 1 << rx_channel;
uint32_t tx = 0;
- rxq->sze = szedata_open(internals->sze_dev);
- if (rxq->sze == NULL)
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->rx_queues[rx_queue_id] != NULL) {
+ eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queues[rx_queue_id] = NULL;
+ }
+
+ rxq = rte_zmalloc_socket("szedata2 rx queue",
+ sizeof(struct szedata2_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for rx queue id "
+ "%" PRIu16 "!", rx_queue_id);
+ return -ENOMEM;
+ }
+
+ rxq->priv = internals;
+ rxq->sze = szedata_open(internals->sze_dev_path);
+ if (rxq->sze == NULL) {
+ PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id "
+ "%" PRIu16 "!", rx_queue_id);
+ eth_rx_queue_release(rxq);
return -EINVAL;
+ }
ret = szedata_subscribe3(rxq->sze, &rx, &tx);
if (ret != 0 || rx == 0) {
- szedata_close(rxq->sze);
- rxq->sze = NULL;
+ PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id "
+ "%" PRIu16 "!", rx_queue_id);
+ eth_rx_queue_release(rxq);
return -EINVAL;
}
- rxq->rx_channel = rx_queue_id;
+ rxq->rx_channel = rx_channel;
+ rxq->qid = rx_queue_id;
rxq->in_port = dev->data->port_id;
rxq->mb_pool = mb_pool;
rxq->rx_pkts = 0;
@@ -1281,6 +1262,11 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
rxq->err_pkts = 0;
dev->data->rx_queues[rx_queue_id] = rxq;
+
+ PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket "
+ "%u (channel id %u).", rxq->qid, socket_id,
+ rxq->rx_channel);
+
return 0;
}
@@ -1288,89 +1274,93 @@ static int
eth_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_queue_id,
uint16_t nb_tx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
+ unsigned int socket_id,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
- struct pmd_internals *internals = dev->data->dev_private;
- struct szedata2_tx_queue *txq = &internals->tx_queue[tx_queue_id];
+ struct szedata2_tx_queue *txq;
int ret;
+ struct pmd_internals *internals = dev->data->dev_private;
+ uint8_t tx_channel = internals->txq_base_id + tx_queue_id;
uint32_t rx = 0;
- uint32_t tx = 1 << tx_queue_id;
+ uint32_t tx = 1 << tx_channel;
+
+ PMD_INIT_FUNC_TRACE();
- txq->sze = szedata_open(internals->sze_dev);
- if (txq->sze == NULL)
+ if (dev->data->tx_queues[tx_queue_id] != NULL) {
+ eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queues[tx_queue_id] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("szedata2 tx queue",
+ sizeof(struct szedata2_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for tx queue id "
+ "%" PRIu16 "!", tx_queue_id);
+ return -ENOMEM;
+ }
+
+ txq->priv = internals;
+ txq->sze = szedata_open(internals->sze_dev_path);
+ if (txq->sze == NULL) {
+ PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id "
+ "%" PRIu16 "!", tx_queue_id);
+ eth_tx_queue_release(txq);
return -EINVAL;
+ }
ret = szedata_subscribe3(txq->sze, &rx, &tx);
if (ret != 0 || tx == 0) {
- szedata_close(txq->sze);
- txq->sze = NULL;
+ PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id "
+ "%" PRIu16 "!", tx_queue_id);
+ eth_tx_queue_release(txq);
return -EINVAL;
}
- txq->tx_channel = tx_queue_id;
+ txq->tx_channel = tx_channel;
+ txq->qid = tx_queue_id;
txq->tx_pkts = 0;
txq->tx_bytes = 0;
txq->err_pkts = 0;
dev->data->tx_queues[tx_queue_id] = txq;
+
+ PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket "
+ "%u (channel id %u).", txq->qid, socket_id,
+ txq->tx_channel);
+
return 0;
}
-static void
+static int
eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
struct ether_addr *mac_addr __rte_unused)
{
+ return 0;
}
static void
-eth_promiscuous_enable(struct rte_eth_dev *dev)
+eth_promiscuous_enable(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
- SZEDATA2_MAC_CHMODE_PROMISC);
- }
+ PMD_DRV_LOG(WARNING, "Enabling promiscuous mode is not supported. "
+ "The card is always in promiscuous mode.");
}
static void
-eth_promiscuous_disable(struct rte_eth_dev *dev)
+eth_promiscuous_disable(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
- SZEDATA2_MAC_CHMODE_ONLY_VALID);
- }
+ PMD_DRV_LOG(WARNING, "Disabling promiscuous mode is not supported. "
+ "The card is always in promiscuous mode.");
}
static void
-eth_allmulticast_enable(struct rte_eth_dev *dev)
+eth_allmulticast_enable(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
- SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
- }
+ PMD_DRV_LOG(WARNING, "Enabling allmulticast mode is not supported.");
}
static void
-eth_allmulticast_disable(struct rte_eth_dev *dev)
+eth_allmulticast_disable(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
- SZEDATA2_MAC_CHMODE_ONLY_VALID);
- }
+ PMD_DRV_LOG(WARNING, "Disabling allmulticast mode is not supported.");
}
static const struct eth_dev_ops ops = {
@@ -1417,9 +1407,9 @@ get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
FILE *fd;
char pcislot_path[PATH_MAX];
uint32_t domain;
- uint32_t bus;
- uint32_t devid;
- uint32_t function;
+ uint8_t bus;
+ uint8_t devid;
+ uint8_t function;
dir = opendir("/sys/class/combo");
if (dir == NULL)
@@ -1444,7 +1434,7 @@ get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
if (fd == NULL)
continue;
- ret = fscanf(fd, "%4" PRIx16 ":%2" PRIx8 ":%2" PRIx8 ".%" PRIx8,
+ ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8,
&domain, &bus, &devid, &function);
fclose(fd);
if (ret != 4)
@@ -1464,109 +1454,62 @@ get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
return -1;
}
+/**
+ * @brief Initializes rte_eth_dev device.
+ * @param dev Device to initialize.
+ * @param pi Structure with info about DMA queues.
+ * @return 0 on success, negative error code on error.
+ */
static int
-rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
+rte_szedata2_eth_dev_init(struct rte_eth_dev *dev, struct port_info *pi)
{
+ int ret;
+ uint32_t szedata2_index;
+ char name[PATH_MAX];
struct rte_eth_dev_data *data = dev->data;
struct pmd_internals *internals = (struct pmd_internals *)
data->dev_private;
- struct szedata *szedata_temp;
- int ret;
- uint32_t szedata2_index;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_pci_addr *pci_addr = &pci_dev->addr;
- struct rte_mem_resource *pci_rsc =
- &pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
- char rsc_filename[PATH_MAX];
- void *pci_resource_ptr = NULL;
- int fd;
- RTE_LOG(INFO, PMD, "Initializing szedata2 device (" PCI_PRI_FMT ")\n",
- pci_addr->domain, pci_addr->bus, pci_addr->devid,
- pci_addr->function);
+ PMD_INIT_FUNC_TRACE();
+ PMD_INIT_LOG(INFO, "Initializing eth_dev %s (driver %s)", data->name,
+ dev->device->driver->name);
+
+ /* Fill internal private structure. */
+ internals->dev = dev;
/* Get index of szedata2 device file and create path to device file */
- ret = get_szedata2_index(pci_addr, &szedata2_index);
+ ret = get_szedata2_index(&pci_dev->addr, &szedata2_index);
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to get szedata2 device index!\n");
+ PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!");
return -ENODEV;
}
- snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT,
- szedata2_index);
-
- RTE_LOG(INFO, PMD, "SZEDATA2 path: %s\n", internals->sze_dev);
-
- /*
- * Get number of available DMA RX and TX channels, which is maximum
- * number of queues that can be created and store it in private device
- * data structure.
- */
- szedata_temp = szedata_open(internals->sze_dev);
- if (szedata_temp == NULL) {
- RTE_LOG(ERR, PMD, "szedata_open(): failed to open %s",
- internals->sze_dev);
- return -EINVAL;
+ snprintf(name, PATH_MAX, SZEDATA2_DEV_PATH_FMT, szedata2_index);
+ internals->sze_dev_path = strdup(name);
+ if (internals->sze_dev_path == NULL) {
+ PMD_INIT_LOG(ERR, "strdup() failed!");
+ return -ENOMEM;
}
- internals->max_rx_queues = szedata_ifaces_available(szedata_temp,
- SZE2_DIR_RX);
- internals->max_tx_queues = szedata_ifaces_available(szedata_temp,
- SZE2_DIR_TX);
- szedata_close(szedata_temp);
-
- RTE_LOG(INFO, PMD, "Available DMA channels RX: %u TX: %u\n",
- internals->max_rx_queues, internals->max_tx_queues);
+ PMD_INIT_LOG(INFO, "SZEDATA2 path: %s", internals->sze_dev_path);
+ internals->max_rx_queues = pi->rx_count;
+ internals->max_tx_queues = pi->tx_count;
+ internals->rxq_base_id = pi->rx_base_id;
+ internals->txq_base_id = pi->tx_base_id;
+ PMD_INIT_LOG(INFO, "%u RX DMA channels from id %u",
+ internals->max_rx_queues, internals->rxq_base_id);
+ PMD_INIT_LOG(INFO, "%u TX DMA channels from id %u",
+ internals->max_tx_queues, internals->txq_base_id);
/* Set rx, tx burst functions */
- if (data->dev_conf.rxmode.enable_scatter == 1 ||
- data->scattered_rx == 1) {
+ if (data->scattered_rx == 1)
dev->rx_pkt_burst = eth_szedata2_rx_scattered;
- data->scattered_rx = 1;
- } else {
+ else
dev->rx_pkt_burst = eth_szedata2_rx;
- data->scattered_rx = 0;
- }
dev->tx_pkt_burst = eth_szedata2_tx;
/* Set function callbacks for Ethernet API */
dev->dev_ops = &ops;
- rte_eth_copy_pci_info(dev, pci_dev);
-
- /* mmap pci resource0 file to rte_mem_resource structure */
- if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
- 0) {
- RTE_LOG(ERR, PMD, "Missing resource%u file\n",
- PCI_RESOURCE_NUMBER);
- return -EINVAL;
- }
- snprintf(rsc_filename, PATH_MAX,
- "%s/" PCI_PRI_FMT "/resource%u", rte_pci_get_sysfs_path(),
- pci_addr->domain, pci_addr->bus,
- pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
- fd = open(rsc_filename, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, PMD, "Could not open file %s\n", rsc_filename);
- return -EINVAL;
- }
-
- pci_resource_ptr = mmap(0,
- pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
- PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- close(fd);
- if (pci_resource_ptr == MAP_FAILED) {
- RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
- rsc_filename, fd);
- return -EINVAL;
- }
- pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr;
- internals->pci_rsc = pci_rsc;
-
- RTE_LOG(DEBUG, PMD, "resource%u phys_addr = 0x%llx len = %llu "
- "virt addr = %llx\n", PCI_RESOURCE_NUMBER,
- (unsigned long long)pci_rsc->phys_addr,
- (unsigned long long)pci_rsc->len,
- (unsigned long long)pci_rsc->addr);
-
/* Get link state */
eth_link_update(dev, 0);
@@ -1574,40 +1517,37 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
RTE_CACHE_LINE_SIZE);
if (data->mac_addrs == NULL) {
- RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
- munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
- pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
- return -EINVAL;
+ PMD_INIT_LOG(ERR, "Could not alloc space for MAC address!");
+ free(internals->sze_dev_path);
+ return -ENOMEM;
}
ether_addr_copy(&eth_addr, data->mac_addrs);
- /* At initial state COMBO card is in promiscuous mode so disable it */
- eth_promiscuous_disable(dev);
-
- RTE_LOG(INFO, PMD, "szedata2 device ("
- PCI_PRI_FMT ") successfully initialized\n",
- pci_addr->domain, pci_addr->bus, pci_addr->devid,
- pci_addr->function);
+ PMD_INIT_LOG(INFO, "%s device %s successfully initialized",
+ dev->device->driver->name, data->name);
return 0;
}
+/**
+ * @brief Unitializes rte_eth_dev device.
+ * @param dev Device to uninitialize.
+ * @return 0 on success, negative error code on error.
+ */
static int
rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_pci_addr *pci_addr = &pci_dev->addr;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ free(internals->sze_dev_path);
rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
- munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
- pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
- RTE_LOG(INFO, PMD, "szedata2 device ("
- PCI_PRI_FMT ") successfully uninitialized\n",
- pci_addr->domain, pci_addr->bus, pci_addr->devid,
- pci_addr->function);
+ PMD_DRV_LOG(INFO, "%s device %s successfully uninitialized",
+ dev->device->driver->name, dev->data->name);
return 0;
}
@@ -1626,21 +1566,349 @@ static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
PCI_DEVICE_ID_NETCOPE_COMBO100G2)
},
{
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
+ PCI_DEVICE_ID_NETCOPE_NFB200G2QL)
+ },
+ {
.vendor_id = 0,
}
};
+/**
+ * @brief Gets info about DMA queues for ports.
+ * @param pci_dev PCI device structure.
+ * @param port_count Pointer to variable set with number of ports.
+ * @param pi Pointer to array of structures with info about DMA queues
+ * for ports.
+ * @param max_ports Maximum number of ports.
+ * @return 0 on success, negative error code on error.
+ */
+static int
+get_port_info(struct rte_pci_device *pci_dev, unsigned int *port_count,
+ struct port_info *pi, unsigned int max_ports)
+{
+ struct szedata *szedata_temp;
+ char sze_dev_path[PATH_MAX];
+ uint32_t szedata2_index;
+ int ret;
+ uint16_t max_rx_queues;
+ uint16_t max_tx_queues;
+
+ if (max_ports == 0)
+ return -EINVAL;
+
+ memset(pi, 0, max_ports * sizeof(struct port_info));
+ *port_count = 0;
+
+ /* Get index of szedata2 device file and create path to device file */
+ ret = get_szedata2_index(&pci_dev->addr, &szedata2_index);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!");
+ return -ENODEV;
+ }
+ snprintf(sze_dev_path, PATH_MAX, SZEDATA2_DEV_PATH_FMT, szedata2_index);
+
+ /*
+ * Get number of available DMA RX and TX channels, which is maximum
+ * number of queues that can be created.
+ */
+ szedata_temp = szedata_open(sze_dev_path);
+ if (szedata_temp == NULL) {
+ PMD_INIT_LOG(ERR, "szedata_open(%s) failed", sze_dev_path);
+ return -EINVAL;
+ }
+ max_rx_queues = szedata_ifaces_available(szedata_temp, SZE2_DIR_RX);
+ max_tx_queues = szedata_ifaces_available(szedata_temp, SZE2_DIR_TX);
+ PMD_INIT_LOG(INFO, "Available DMA channels RX: %u TX: %u",
+ max_rx_queues, max_tx_queues);
+ if (max_rx_queues > RTE_ETH_SZEDATA2_MAX_RX_QUEUES) {
+ PMD_INIT_LOG(ERR, "%u RX queues exceeds supported number %u",
+ max_rx_queues, RTE_ETH_SZEDATA2_MAX_RX_QUEUES);
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ }
+ if (max_tx_queues > RTE_ETH_SZEDATA2_MAX_TX_QUEUES) {
+ PMD_INIT_LOG(ERR, "%u TX queues exceeds supported number %u",
+ max_tx_queues, RTE_ETH_SZEDATA2_MAX_TX_QUEUES);
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ }
+
+ if (pci_dev->id.device_id == PCI_DEVICE_ID_NETCOPE_NFB200G2QL) {
+ unsigned int i;
+ unsigned int rx_queues = max_rx_queues / max_ports;
+ unsigned int tx_queues = max_tx_queues / max_ports;
+
+ /*
+ * Number of queues reported by szedata_ifaces_available()
+ * is the number of all queues from all DMA controllers which
+ * may reside at different numa locations.
+ * All queues from the same DMA controller have the same numa
+ * node.
+ * Numa node from the first queue of each DMA controller is
+ * retrieved.
+ * If the numa node differs from the numa node of the queues
+ * from the previous DMA controller the queues are assigned
+ * to the next port.
+ */
+
+ for (i = 0; i < max_ports; i++) {
+ int numa_rx = szedata_get_area_numa_node(szedata_temp,
+ SZE2_DIR_RX, rx_queues * i);
+ int numa_tx = szedata_get_area_numa_node(szedata_temp,
+ SZE2_DIR_TX, tx_queues * i);
+ unsigned int port_rx_queues = numa_rx != -1 ?
+ rx_queues : 0;
+ unsigned int port_tx_queues = numa_tx != -1 ?
+ tx_queues : 0;
+ PMD_INIT_LOG(DEBUG, "%u rx queues from id %u, numa %d",
+ rx_queues, rx_queues * i, numa_rx);
+ PMD_INIT_LOG(DEBUG, "%u tx queues from id %u, numa %d",
+ tx_queues, tx_queues * i, numa_tx);
+
+ if (port_rx_queues != 0 && port_tx_queues != 0 &&
+ numa_rx != numa_tx) {
+ PMD_INIT_LOG(ERR, "RX queue %u numa %d differs "
+ "from TX queue %u numa %d "
+ "unexpectedly",
+ rx_queues * i, numa_rx,
+ tx_queues * i, numa_tx);
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ } else if (port_rx_queues == 0 && port_tx_queues == 0) {
+ continue;
+ } else {
+ unsigned int j;
+ unsigned int current = *port_count;
+ int port_numa = port_rx_queues != 0 ?
+ numa_rx : numa_tx;
+
+ for (j = 0; j < *port_count; j++) {
+ if (pi[j].numa_node ==
+ port_numa) {
+ current = j;
+ break;
+ }
+ }
+ if (pi[current].rx_count == 0 &&
+ pi[current].tx_count == 0) {
+ pi[current].rx_base_id = rx_queues * i;
+ pi[current].tx_base_id = tx_queues * i;
+ (*port_count)++;
+ } else if ((rx_queues * i !=
+ pi[current].rx_base_id +
+ pi[current].rx_count) ||
+ (tx_queues * i !=
+ pi[current].tx_base_id +
+ pi[current].tx_count)) {
+ PMD_INIT_LOG(ERR, "Queue ids does not "
+ "fulfill constraints");
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ }
+ pi[current].rx_count += port_rx_queues;
+ pi[current].tx_count += port_tx_queues;
+ pi[current].numa_node = port_numa;
+ }
+ }
+ } else {
+ pi[0].rx_count = max_rx_queues;
+ pi[0].tx_count = max_tx_queues;
+ pi[0].numa_node = pci_dev->device.numa_node;
+ *port_count = 1;
+ }
+
+ szedata_close(szedata_temp);
+ return 0;
+}
+
+/**
+ * @brief Allocates rte_eth_dev device.
+ * @param pci_dev Corresponding PCI device.
+ * @param numa_node NUMA node on which device is allocated.
+ * @param port_no Id of rte_eth_device created on PCI device pci_dev.
+ * @return Pointer to allocated device or NULL on error.
+ */
+static struct rte_eth_dev *
+szedata2_eth_dev_allocate(struct rte_pci_device *pci_dev, int numa_node,
+ unsigned int port_no)
+{
+ struct rte_eth_dev *eth_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ PMD_INIT_FUNC_TRACE();
+
+ snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s"
+ SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT,
+ pci_dev->device.name, port_no);
+ PMD_INIT_LOG(DEBUG, "Allocating eth_dev %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ return NULL;
+
+ eth_dev->data->dev_private = rte_zmalloc_socket(name,
+ sizeof(struct pmd_internals), RTE_CACHE_LINE_SIZE,
+ numa_node);
+ if (!eth_dev->data->dev_private) {
+ rte_eth_dev_release_port(eth_dev);
+ return NULL;
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev)
+ return NULL;
+ }
+
+ eth_dev->device = &pci_dev->device;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->numa_node = numa_node;
+ return eth_dev;
+}
+
+/**
+ * @brief Releases interval of rte_eth_dev devices from array.
+ * @param eth_devs Array of pointers to rte_eth_dev devices.
+ * @param from Index in array eth_devs to start with.
+ * @param to Index in array right after the last element to release.
+ *
+ * Used for releasing at failed initialization.
+ */
+static void
+szedata2_eth_dev_release_interval(struct rte_eth_dev **eth_devs,
+ unsigned int from, unsigned int to)
+{
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = from; i < to; i++) {
+ rte_szedata2_eth_dev_uninit(eth_devs[i]);
+ rte_eth_dev_pci_release(eth_devs[i]);
+ }
+}
+
+/**
+ * @brief Callback .probe for struct rte_pci_driver.
+ */
static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct pmd_internals), rte_szedata2_eth_dev_init);
+ struct port_info port_info[SZEDATA2_MAX_PORTS];
+ unsigned int port_count;
+ int ret;
+ unsigned int i;
+ struct pci_dev_list_entry *list_entry;
+ struct rte_eth_dev *eth_devs[SZEDATA2_MAX_PORTS] = {NULL,};
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = get_port_info(pci_dev, &port_count, port_info,
+ SZEDATA2_MAX_PORTS);
+ if (ret != 0)
+ return ret;
+
+ if (port_count == 0) {
+ PMD_INIT_LOG(ERR, "No available ports!");
+ return -ENODEV;
+ }
+
+ list_entry = rte_zmalloc(NULL, sizeof(struct pci_dev_list_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (list_entry == NULL) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc() failed!");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < port_count; i++) {
+ eth_devs[i] = szedata2_eth_dev_allocate(pci_dev,
+ port_info[i].numa_node, i);
+ if (eth_devs[i] == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to alloc eth_dev for port %u",
+ i);
+ szedata2_eth_dev_release_interval(eth_devs, 0, i);
+ rte_free(list_entry);
+ return -ENOMEM;
+ }
+
+ ret = rte_szedata2_eth_dev_init(eth_devs[i], &port_info[i]);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init eth_dev for port %u",
+ i);
+ rte_eth_dev_pci_release(eth_devs[i]);
+ szedata2_eth_dev_release_interval(eth_devs, 0, i);
+ rte_free(list_entry);
+ return ret;
+ }
+
+ rte_eth_dev_probing_finish(eth_devs[i]);
+ }
+
+ /*
+ * Add pci_dev to list of PCI devices for this driver
+ * which is used at remove callback to release all created eth_devs.
+ */
+ list_entry->pci_dev = pci_dev;
+ list_entry->port_count = port_count;
+ LIST_INSERT_HEAD(&szedata2_pci_dev_list, list_entry, next);
+ return 0;
}
+/**
+ * @brief Callback .remove for struct rte_pci_driver.
+ */
static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev,
- rte_szedata2_eth_dev_uninit);
+ unsigned int i;
+ unsigned int port_count;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+ int ret;
+ int retval = 0;
+ bool found = false;
+ struct pci_dev_list_entry *list_entry = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+
+ LIST_FOREACH(list_entry, &szedata2_pci_dev_list, next) {
+ if (list_entry->pci_dev == pci_dev) {
+ port_count = list_entry->port_count;
+ found = true;
+ break;
+ }
+ }
+ LIST_REMOVE(list_entry, next);
+ rte_free(list_entry);
+
+ if (!found) {
+ PMD_DRV_LOG(ERR, "PCI device " PCI_PRI_FMT " not found",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < port_count; i++) {
+ snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s"
+ SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT,
+ pci_dev->device.name, i);
+ PMD_DRV_LOG(DEBUG, "Removing eth_dev %s", name);
+ eth_dev = rte_eth_dev_allocated(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(ERR, "eth_dev %s not found", name);
+ retval = retval ? retval : -ENODEV;
+ }
+
+ ret = rte_szedata2_eth_dev_uninit(eth_dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "eth_dev %s uninit failed", name);
+ retval = retval ? retval : ret;
+ }
+
+ rte_eth_dev_pci_release(eth_dev);
+ }
+
+ return retval;
}
static struct rte_pci_driver szedata2_eth_driver = {
@@ -1652,4 +1920,16 @@ static struct rte_pci_driver szedata2_eth_driver = {
RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
- "* combo6core & combov3 & szedata2 & szedata2_cv3");
+ "* combo6core & combov3 & szedata2 & ( szedata2_cv3 | szedata2_cv3_fdt )");
+
+RTE_INIT(szedata2_init_log);
+static void
+szedata2_init_log(void)
+{
+ szedata2_logtype_init = rte_log_register("pmd.net.szedata2.init");
+ if (szedata2_logtype_init >= 0)
+ rte_log_set_level(szedata2_logtype_init, RTE_LOG_NOTICE);
+ szedata2_logtype_driver = rte_log_register("pmd.net.szedata2.driver");
+ if (szedata2_logtype_driver >= 0)
+ rte_log_set_level(szedata2_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/szedata2/rte_eth_szedata2.h b/drivers/net/szedata2/rte_eth_szedata2.h
index f25d4c59..26a82b35 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.h
+++ b/drivers/net/szedata2/rte_eth_szedata2.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015 - 2016 CESNET
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of CESNET nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 - 2016 CESNET
*/
#ifndef RTE_PMD_SZEDATA2_H_
@@ -47,9 +18,7 @@
#define PCI_DEVICE_ID_NETCOPE_COMBO80G 0xcb80
#define PCI_DEVICE_ID_NETCOPE_COMBO100G 0xc1c1
#define PCI_DEVICE_ID_NETCOPE_COMBO100G2 0xc2c1
-
-/* number of PCI resource used by COMBO card */
-#define PCI_RESOURCE_NUMBER 0
+#define PCI_DEVICE_ID_NETCOPE_NFB200G2QL 0xc250
/* szedata2_packet header length == 4 bytes == 2B segment size + 2B hw size */
#define RTE_SZE2_PACKET_HEADER_SIZE 4
diff --git a/drivers/net/szedata2/szedata2_iobuf.c b/drivers/net/szedata2/szedata2_iobuf.c
deleted file mode 100644
index 3b9a71fe..00000000
--- a/drivers/net/szedata2/szedata2_iobuf.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2017 CESNET
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of CESNET nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-
-#include <rte_common.h>
-
-#include "szedata2_iobuf.h"
-
-/*
- * IBUFs and OBUFs can generally be located at different offsets in different
- * firmwares (modes).
- * This part defines base offsets of IBUFs and OBUFs for various cards
- * and firmwares (modes).
- * Type of firmware (mode) is set through configuration option
- * CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS.
- * Possible values are:
- * 0 - for cards (modes):
- * NFB-100G1 (100G1)
- *
- * 1 - for cards (modes):
- * NFB-100G2Q (100G1)
- *
- * 2 - for cards (modes):
- * NFB-40G2 (40G2)
- * NFB-100G2C (100G2)
- * NFB-100G2Q (40G2)
- *
- * 3 - for cards (modes):
- * NFB-40G2 (10G8)
- * NFB-100G2Q (10G8)
- *
- * 4 - for cards (modes):
- * NFB-100G1 (10G10)
- *
- * 5 - for experimental firmwares and future use
- */
-#if !defined(RTE_LIBRTE_PMD_SZEDATA2_AS)
-#error "RTE_LIBRTE_PMD_SZEDATA2_AS has to be defined"
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 0
-
-/*
- * Cards (modes):
- * NFB-100G1 (100G1)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9000
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 1
-
-/*
- * Cards (modes):
- * NFB-100G2Q (100G1)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8800
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9800
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 2
-
-/*
- * Cards (modes):
- * NFB-40G2 (40G2)
- * NFB-100G2C (100G2)
- * NFB-100G2Q (40G2)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000,
- 0x8800
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9000,
- 0x9800
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 3
-
-/*
- * Cards (modes):
- * NFB-40G2 (10G8)
- * NFB-100G2Q (10G8)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000,
- 0x8200,
- 0x8400,
- 0x8600,
- 0x8800,
- 0x8A00,
- 0x8C00,
- 0x8E00
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9000,
- 0x9200,
- 0x9400,
- 0x9600,
- 0x9800,
- 0x9A00,
- 0x9C00,
- 0x9E00
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 4
-
-/*
- * Cards (modes):
- * NFB-100G1 (10G10)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000,
- 0x8200,
- 0x8400,
- 0x8600,
- 0x8800,
- 0x8A00,
- 0x8C00,
- 0x8E00,
- 0x9000,
- 0x9200
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0xA000,
- 0xA200,
- 0xA400,
- 0xA600,
- 0xA800,
- 0xAA00,
- 0xAC00,
- 0xAE00,
- 0xB000,
- 0xB200
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 5
-
-/*
- * Future use and experimental firmwares.
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000,
- 0x8200,
- 0x8400,
- 0x8600,
- 0x8800
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9000,
- 0x9200,
- 0x9400,
- 0x9600,
- 0x9800
-};
-
-#else
-#error "RTE_LIBRTE_PMD_SZEDATA2_AS has wrong value, see comments in config file"
-#endif
-
-const uint32_t szedata2_ibuf_count = RTE_DIM(szedata2_ibuf_base_table);
-const uint32_t szedata2_obuf_count = RTE_DIM(szedata2_obuf_base_table);
diff --git a/drivers/net/szedata2/szedata2_iobuf.h b/drivers/net/szedata2/szedata2_iobuf.h
deleted file mode 100644
index f1ccb3b2..00000000
--- a/drivers/net/szedata2/szedata2_iobuf.h
+++ /dev/null
@@ -1,356 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2017 CESNET
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of CESNET nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SZEDATA2_IOBUF_H_
-#define _SZEDATA2_IOBUF_H_
-
-#include <stdint.h>
-#include <stdbool.h>
-
-#include <rte_byteorder.h>
-#include <rte_io.h>
-#include <rte_dev.h>
-
-/* IBUF offsets from the beginning of the PCI resource address space. */
-extern const uint32_t szedata2_ibuf_base_table[];
-extern const uint32_t szedata2_ibuf_count;
-
-/* OBUF offsets from the beginning of the PCI resource address space. */
-extern const uint32_t szedata2_obuf_base_table[];
-extern const uint32_t szedata2_obuf_count;
-
-enum szedata2_link_speed {
- SZEDATA2_LINK_SPEED_DEFAULT = 0,
- SZEDATA2_LINK_SPEED_10G,
- SZEDATA2_LINK_SPEED_40G,
- SZEDATA2_LINK_SPEED_100G,
-};
-
-enum szedata2_mac_check_mode {
- SZEDATA2_MAC_CHMODE_PROMISC = 0x0,
- SZEDATA2_MAC_CHMODE_ONLY_VALID = 0x1,
- SZEDATA2_MAC_CHMODE_ALL_BROADCAST = 0x2,
- SZEDATA2_MAC_CHMODE_ALL_MULTICAST = 0x3,
-};
-
-/**
- * Macro takes pointer to pci resource structure (rsc)
- * and returns pointer to mapped resource memory at
- * specified offset (offset) typecast to the type (type).
- */
-#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \
- ((type)(((uint8_t *)(rsc)->addr) + (offset)))
-
-/**
- * Maximum possible number of MAC addresses (limited by IBUF status
- * register value MAC_COUNT which has 5 bits).
- */
-#define SZEDATA2_IBUF_MAX_MAC_COUNT 32
-
-/**
- * Structure describes IBUF address space.
- */
-struct szedata2_ibuf {
- /** Total Received Frames Counter low part */
- uint32_t trfcl; /**< 0x00 */
- /** Correct Frames Counter low part */
- uint32_t cfcl; /**< 0x04 */
- /** Discarded Frames Counter low part */
- uint32_t dfcl; /**< 0x08 */
- /** Counter of frames discarded due to buffer overflow low part */
- uint32_t bodfcl; /**< 0x0C */
- /** Total Received Frames Counter high part */
- uint32_t trfch; /**< 0x10 */
- /** Correct Frames Counter high part */
- uint32_t cfch; /**< 0x14 */
- /** Discarded Frames Counter high part */
- uint32_t dfch; /**< 0x18 */
- /** Counter of frames discarded due to buffer overflow high part */
- uint32_t bodfch; /**< 0x1C */
- /** IBUF enable register */
- uint32_t ibuf_en; /**< 0x20 */
- /** Error mask register */
- uint32_t err_mask; /**< 0x24 */
- /** IBUF status register */
- uint32_t ibuf_st; /**< 0x28 */
- /** IBUF command register */
- uint32_t ibuf_cmd; /**< 0x2C */
- /** Minimum frame length allowed */
- uint32_t mfla; /**< 0x30 */
- /** Frame MTU */
- uint32_t mtu; /**< 0x34 */
- /** MAC address check mode */
- uint32_t mac_chmode; /**< 0x38 */
- /** Octets Received OK Counter low part */
- uint32_t orocl; /**< 0x3C */
- /** Octets Received OK Counter high part */
- uint32_t oroch; /**< 0x40 */
- /** reserved */
- uint8_t reserved[60]; /**< 0x4C */
- /** IBUF memory for MAC addresses */
- uint32_t mac_mem[2 * SZEDATA2_IBUF_MAX_MAC_COUNT]; /**< 0x80 */
-} __rte_packed;
-
-/**
- * Structure describes OBUF address space.
- */
-struct szedata2_obuf {
- /** Total Sent Frames Counter low part */
- uint32_t tsfcl; /**< 0x00 */
- /** Octets Sent Counter low part */
- uint32_t oscl; /**< 0x04 */
- /** Total Discarded Frames Counter low part */
- uint32_t tdfcl; /**< 0x08 */
- /** reserved */
- uint32_t reserved1; /**< 0x0C */
- /** Total Sent Frames Counter high part */
- uint32_t tsfch; /**< 0x10 */
- /** Octets Sent Counter high part */
- uint32_t osch; /**< 0x14 */
- /** Total Discarded Frames Counter high part */
- uint32_t tdfch; /**< 0x18 */
- /** reserved */
- uint32_t reserved2; /**< 0x1C */
- /** OBUF enable register */
- uint32_t obuf_en; /**< 0x20 */
- /** reserved */
- uint64_t reserved3; /**< 0x24 */
- /** OBUF control register */
- uint32_t ctrl; /**< 0x2C */
- /** OBUF status register */
- uint32_t obuf_st; /**< 0x30 */
-} __rte_packed;
-
-/**
- * Wrapper for reading 4 bytes from device memory in correct endianness.
- *
- * @param addr
- * Address for reading.
- * @return
- * 4 B value.
- */
-static inline uint32_t
-szedata2_read32(const volatile void *addr)
-{
- return rte_le_to_cpu_32(rte_read32(addr));
-}
-
-/**
- * Wrapper for writing 4 bytes to device memory in correct endianness.
- *
- * @param value
- * Value to write.
- * @param addr
- * Address for writing.
- */
-static inline void
-szedata2_write32(uint32_t value, volatile void *addr)
-{
- rte_write32(rte_cpu_to_le_32(value), addr);
-}
-
-/**
- * Get pointer to IBUF structure according to specified index.
- *
- * @param rsc
- * Pointer to base address of memory resource.
- * @param index
- * Index of IBUF.
- * @return
- * Pointer to IBUF structure.
- */
-static inline struct szedata2_ibuf *
-ibuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index)
-{
- if (index >= szedata2_ibuf_count)
- index = szedata2_ibuf_count - 1;
- return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_ibuf_base_table[index],
- struct szedata2_ibuf *);
-}
-
-/**
- * Get pointer to OBUF structure according to specified idnex.
- *
- * @param rsc
- * Pointer to base address of memory resource.
- * @param index
- * Index of OBUF.
- * @return
- * Pointer to OBUF structure.
- */
-static inline struct szedata2_obuf *
-obuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index)
-{
- if (index >= szedata2_obuf_count)
- index = szedata2_obuf_count - 1;
- return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_obuf_base_table[index],
- struct szedata2_obuf *);
-}
-
-/**
- * Checks if IBUF is enabled.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- * @return
- * true if IBUF is enabled.
- * false if IBUF is disabled.
- */
-static inline bool
-ibuf_is_enabled(const volatile struct szedata2_ibuf *ibuf)
-{
- return ((szedata2_read32(&ibuf->ibuf_en) & 0x1) != 0) ? true : false;
-}
-
-/**
- * Enables IBUF.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- */
-static inline void
-ibuf_enable(volatile struct szedata2_ibuf *ibuf)
-{
- szedata2_write32(szedata2_read32(&ibuf->ibuf_en) | 0x1, &ibuf->ibuf_en);
-}
-
-/**
- * Disables IBUF.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- */
-static inline void
-ibuf_disable(volatile struct szedata2_ibuf *ibuf)
-{
- szedata2_write32(szedata2_read32(&ibuf->ibuf_en) & ~0x1,
- &ibuf->ibuf_en);
-}
-
-/**
- * Checks if link is up.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- * @return
- * true if ibuf link is up.
- * false if ibuf link is down.
- */
-static inline bool
-ibuf_is_link_up(const volatile struct szedata2_ibuf *ibuf)
-{
- return ((szedata2_read32(&ibuf->ibuf_st) & 0x80) != 0) ? true : false;
-}
-
-/**
- * Get current MAC address check mode from IBUF.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- * @return
- * MAC address check mode constant.
- */
-static inline enum szedata2_mac_check_mode
-ibuf_mac_mode_read(const volatile struct szedata2_ibuf *ibuf)
-{
- switch (szedata2_read32(&ibuf->mac_chmode) & 0x3) {
- case 0x0:
- return SZEDATA2_MAC_CHMODE_PROMISC;
- case 0x1:
- return SZEDATA2_MAC_CHMODE_ONLY_VALID;
- case 0x2:
- return SZEDATA2_MAC_CHMODE_ALL_BROADCAST;
- case 0x3:
- return SZEDATA2_MAC_CHMODE_ALL_MULTICAST;
- default:
- return SZEDATA2_MAC_CHMODE_PROMISC;
- }
-}
-
-/**
- * Writes mode in MAC address check mode register in IBUF.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- * @param mode
- * MAC address check mode to set.
- */
-static inline void
-ibuf_mac_mode_write(volatile struct szedata2_ibuf *ibuf,
- enum szedata2_mac_check_mode mode)
-{
- szedata2_write32((szedata2_read32(&ibuf->mac_chmode) & ~0x3) | mode,
- &ibuf->mac_chmode);
-}
-
-/**
- * Checks if obuf is enabled.
- *
- * @param obuf
- * Pointer to OBUF structure.
- * @return
- * true if OBUF is enabled.
- * false if OBUF is disabled.
- */
-static inline bool
-obuf_is_enabled(const volatile struct szedata2_obuf *obuf)
-{
- return ((szedata2_read32(&obuf->obuf_en) & 0x1) != 0) ? true : false;
-}
-
-/**
- * Enables OBUF.
- *
- * @param obuf
- * Pointer to OBUF structure.
- */
-static inline void
-obuf_enable(volatile struct szedata2_obuf *obuf)
-{
- szedata2_write32(szedata2_read32(&obuf->obuf_en) | 0x1, &obuf->obuf_en);
-}
-
-/**
- * Disables OBUF.
- *
- * @param obuf
- * Pointer to OBUF structure.
- */
-static inline void
-obuf_disable(volatile struct szedata2_obuf *obuf)
-{
- szedata2_write32(szedata2_read32(&obuf->obuf_en) & ~0x1,
- &obuf->obuf_en);
-}
-
-#endif /* _SZEDATA2_IOBUF_H_ */
diff --git a/drivers/net/szedata2/szedata2_logs.h b/drivers/net/szedata2/szedata2_logs.h
new file mode 100644
index 00000000..8d06ffa3
--- /dev/null
+++ b/drivers/net/szedata2/szedata2_logs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 CESNET
+ */
+
+#ifndef _SZEDATA2_LOGS_H_
+#define _SZEDATA2_LOGS_H_
+
+#include <rte_log.h>
+
+extern int szedata2_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, szedata2_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int szedata2_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, szedata2_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#endif /* _SZEDATA2_LOGS_H_ */
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index f09db0ea..5531fe9d 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -15,6 +15,7 @@
#include <rte_net.h>
#include <rte_debug.h>
#include <rte_ip.h>
+#include <rte_string_fns.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -34,6 +35,7 @@
#include <linux/if_ether.h>
#include <fcntl.h>
+#include <tap_rss.h>
#include <rte_eth_tap.h>
#include <tap_flow.h>
#include <tap_netlink.h>
@@ -42,13 +44,19 @@
/* Linux based path to the TUN device */
#define TUN_TAP_DEV_PATH "/dev/net/tun"
#define DEFAULT_TAP_NAME "dtap"
+#define DEFAULT_TUN_NAME "dtun"
#define ETH_TAP_IFACE_ARG "iface"
#define ETH_TAP_REMOTE_ARG "remote"
#define ETH_TAP_MAC_ARG "mac"
#define ETH_TAP_MAC_FIXED "fixed"
+#define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx"
+#define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
+#define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
+
static struct rte_vdev_driver pmd_tap_drv;
+static struct rte_vdev_driver pmd_tun_drv;
static const char *valid_arguments[] = {
ETH_TAP_IFACE_ARG,
@@ -58,6 +66,9 @@ static const char *valid_arguments[] = {
};
static int tap_unit;
+static unsigned int tun_unit;
+
+static char tuntap_name[8];
static volatile uint32_t tap_trigger; /* Rx trigger */
@@ -65,7 +76,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_autoneg = ETH_LINK_FIXED,
};
static void
@@ -84,13 +95,20 @@ enum ioctl_mode {
static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
-/* Tun/Tap allocation routine
+/**
+ * Tun/Tap allocation routine
+ *
+ * @param[in] pmd
+ * Pointer to private structure.
*
- * name is the number of the interface to use, unless NULL to take the host
- * supplied name.
+ * @param[in] is_keepalive
+ * Keepalive flag
+ *
+ * @return
+ * -1 on failure, fd on success
*/
static int
-tun_alloc(struct pmd_internals *pmd)
+tun_alloc(struct pmd_internals *pmd, int is_keepalive)
{
struct ifreq ifr;
#ifdef IFF_MULTI_QUEUE
@@ -104,51 +122,64 @@ tun_alloc(struct pmd_internals *pmd)
* Do not set IFF_NO_PI as packet information header will be needed
* to check if a received packet has been truncated.
*/
- ifr.ifr_flags = IFF_TAP;
+ ifr.ifr_flags = (pmd->type == ETH_TUNTAP_TYPE_TAP) ?
+ IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
- RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
+ TAP_LOG(DEBUG, "ifr_name '%s'", ifr.ifr_name);
fd = open(TUN_TAP_DEV_PATH, O_RDWR);
if (fd < 0) {
- RTE_LOG(ERR, PMD, "Unable to create TAP interface\n");
+ TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
goto error;
}
#ifdef IFF_MULTI_QUEUE
/* Grab the TUN features to verify we can work multi-queue */
if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
- RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n");
+ TAP_LOG(ERR, "%s unable to get TUN/TAP features",
+ tuntap_name);
goto error;
}
- RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features);
+ TAP_LOG(DEBUG, "%s Features %08x", tuntap_name, features);
if (features & IFF_MULTI_QUEUE) {
- RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n",
+ TAP_LOG(DEBUG, " Multi-queue support for %d queues",
RTE_PMD_TAP_MAX_QUEUES);
ifr.ifr_flags |= IFF_MULTI_QUEUE;
} else
#endif
{
ifr.ifr_flags |= IFF_ONE_QUEUE;
- RTE_LOG(DEBUG, PMD, " Single queue only support\n");
+ TAP_LOG(DEBUG, " Single queue only support");
}
/* Set the TUN/TAP configuration and set the name if needed */
if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
- RTE_LOG(WARNING, PMD,
- "Unable to set TUNSETIFF for %s\n",
- ifr.ifr_name);
- perror("TUNSETIFF");
+ TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s",
+ ifr.ifr_name, strerror(errno));
goto error;
}
+ if (is_keepalive) {
+ /*
+ * Detach the TUN/TAP keep-alive queue
+ * to avoid traffic through it
+ */
+ ifr.ifr_flags = IFF_DETACH_QUEUE;
+ if (ioctl(fd, TUNSETQUEUE, (void *)&ifr) < 0) {
+ TAP_LOG(WARNING,
+ "Unable to detach keep-alive queue for %s: %s",
+ ifr.ifr_name, strerror(errno));
+ goto error;
+ }
+ }
+
/* Always set the file descriptor to non-blocking */
if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
- RTE_LOG(WARNING, PMD,
- "Unable to set %s to nonblocking\n",
- ifr.ifr_name);
- perror("F_SETFL, NONBLOCK");
+ TAP_LOG(WARNING,
+ "Unable to set %s to nonblocking: %s",
+ ifr.ifr_name, strerror(errno));
goto error;
}
@@ -182,10 +213,11 @@ tun_alloc(struct pmd_internals *pmd)
fcntl(fd, F_SETFL, flags | O_ASYNC);
fcntl(fd, F_SETOWN, getpid());
} while (0);
+
if (errno) {
/* Disable trigger globally in case of error */
tap_trigger = 0;
- RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
+ TAP_LOG(WARNING, "Rx trigger disabled: %s",
strerror(errno));
}
@@ -255,14 +287,9 @@ static uint64_t
tap_rx_offload_get_port_capa(void)
{
/*
- * In order to support legacy apps,
- * report capabilities also as port capabilities.
+ * No specific port Rx offload capabilities.
*/
- return DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ return 0;
}
static uint64_t
@@ -275,21 +302,6 @@ tap_rx_offload_get_queue_capa(void)
DEV_RX_OFFLOAD_CRC_STRIP;
}
-static bool
-tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
- uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
-
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
/* Callback to handle the rx burst of packets to the correct interface and
* file descriptor(s) in a multi-queue setup.
*/
@@ -389,13 +401,9 @@ static uint64_t
tap_tx_offload_get_port_capa(void)
{
/*
- * In order to support legacy apps,
- * report capabilities also as port capabilities.
+ * No specific port Tx offload capabilities.
*/
- return DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ return 0;
}
static uint64_t
@@ -407,22 +415,6 @@ tap_tx_offload_get_queue_capa(void)
DEV_TX_OFFLOAD_TCP_CKSUM;
}
-static bool
-tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
- uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
-
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- /* Verify we have no conflict with port offloads */
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
static void
tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
unsigned int l3_len)
@@ -491,7 +483,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
for (i = 0; i < nb_pkts; i++) {
struct rte_mbuf *mbuf = bufs[num_tx];
struct iovec iovecs[mbuf->nb_segs + 1];
- struct tun_pi pi = { .flags = 0 };
+ struct tun_pi pi = { .flags = 0, .proto = 0x00 };
struct rte_mbuf *seg = mbuf;
char m_copy[mbuf->data_len];
int n;
@@ -501,6 +493,23 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (rte_pktmbuf_pkt_len(mbuf) > max_size)
break;
+ if (txq->type == ETH_TUNTAP_TYPE_TUN) {
+ /*
+ * TUN and TAP are created with IFF_NO_PI disabled.
+ * For TUN PMD this mandatory as fields are used by
+ * Kernel tun.c to determine whether its IP or non IP
+ * packets.
+ *
+ * The logic fetches the first byte of data from mbuf
+ * then compares whether its v4 or v6. If first byte
+ * is 4 or 6, then protocol field is updated.
+ */
+ char *buff_data = rte_pktmbuf_mtod(seg, void *);
+ j = (*buff_data & 0xf0);
+ pi.proto = (j == 0x40) ? rte_cpu_to_be_16(ETHER_TYPE_IPv4) :
+ (j == 0x60) ? rte_cpu_to_be_16(ETHER_TYPE_IPv6) : 0x00;
+ }
+
iovecs[0].iov_base = &pi;
iovecs[0].iov_len = sizeof(pi);
for (j = 1; j <= mbuf->nb_segs; j++) {
@@ -593,7 +602,9 @@ apply:
case SIOCSIFMTU:
break;
default:
- RTE_ASSERT(!"unsupported request type: must not happen");
+ RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
+ pmd->name);
+ return -EINVAL;
}
if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
goto error;
@@ -602,8 +613,8 @@ apply:
return 0;
error:
- RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name,
- __func__, tap_ioctl_req2str(request), strerror(errno), errno);
+ TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name,
+ tap_ioctl_req2str(request), strerror(errno), errno);
return -errno;
}
@@ -650,39 +661,28 @@ tap_dev_stop(struct rte_eth_dev *dev)
static int
tap_dev_configure(struct rte_eth_dev *dev)
{
- uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa();
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-
- if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
- rte_errno = ENOTSUP;
- RTE_LOG(ERR, PMD,
- "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- tx_offloads, supp_tx_offloads);
- return -rte_errno;
- }
if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
- RTE_LOG(ERR, PMD,
- "%s: number of rx queues %d exceeds max num of queues %d\n",
+ TAP_LOG(ERR,
+ "%s: number of rx queues %d exceeds max num of queues %d",
dev->device->name,
dev->data->nb_rx_queues,
RTE_PMD_TAP_MAX_QUEUES);
return -1;
}
if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
- RTE_LOG(ERR, PMD,
- "%s: number of tx queues %d exceeds max num of queues %d\n",
+ TAP_LOG(ERR,
+ "%s: number of tx queues %d exceeds max num of queues %d",
dev->device->name,
dev->data->nb_tx_queues,
RTE_PMD_TAP_MAX_QUEUES);
return -1;
}
- RTE_LOG(INFO, PMD, "%s: %p: TX configured queues number: %u\n",
- dev->device->name, (void *)dev, dev->data->nb_tx_queues);
+ TAP_LOG(INFO, "%s: %p: TX configured queues number: %u",
+ dev->device->name, (void *)dev, dev->data->nb_tx_queues);
- RTE_LOG(INFO, PMD, "%s: %p: RX configured queues number: %u\n",
- dev->device->name, (void *)dev, dev->data->nb_rx_queues);
+ TAP_LOG(INFO, "%s: %p: RX configured queues number: %u",
+ dev->device->name, (void *)dev, dev->data->nb_rx_queues);
return 0;
}
@@ -732,7 +732,6 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
dev_info->speed_capa = tap_dev_speed_capa();
dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
@@ -740,6 +739,12 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
dev_info->tx_queue_offload_capa;
+ dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
+ /*
+ * limitation: TAP supports all of IP, UDP and TCP hash
+ * functions together and not in partial combinations
+ */
+ dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
}
static int
@@ -830,6 +835,15 @@ tap_dev_close(struct rte_eth_dev *dev)
ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
&internals->remote_initial_flags);
}
+
+ if (internals->ka_fd != -1) {
+ close(internals->ka_fd);
+ internals->ka_fd = -1;
+ }
+ /*
+ * Since TUN device has no more opened file descriptors
+ * it will be removed from kernel
+ */
}
static void
@@ -929,48 +943,64 @@ tap_allmulti_disable(struct rte_eth_dev *dev)
tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
}
-static void
+static int
tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct pmd_internals *pmd = dev->data->dev_private;
enum ioctl_mode mode = LOCAL_ONLY;
struct ifreq ifr;
+ int ret;
+
+ if (pmd->type == ETH_TUNTAP_TYPE_TUN) {
+ TAP_LOG(ERR, "%s: can't MAC address for TUN",
+ dev->device->name);
+ return -ENOTSUP;
+ }
if (is_zero_ether_addr(mac_addr)) {
- RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
+ TAP_LOG(ERR, "%s: can't set an empty MAC address",
dev->device->name);
- return;
+ return -EINVAL;
}
/* Check the actual current MAC address on the tap netdevice */
- if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
- return;
+ ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
+ if (ret < 0)
+ return ret;
if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
mac_addr))
- return;
+ return 0;
/* Check the current MAC address on the remote */
- if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0)
- return;
+ ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
+ if (ret < 0)
+ return ret;
if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
mac_addr))
mode = LOCAL_AND_REMOTE;
ifr.ifr_hwaddr.sa_family = AF_LOCAL;
rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
- if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode) < 0)
- return;
+ ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
+ if (ret < 0)
+ return ret;
rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
if (pmd->remote_if_index && !pmd->flow_isolate) {
/* Replace MAC redirection rule after a MAC change */
- if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
- RTE_LOG(ERR, PMD,
- "%s: Couldn't delete MAC redirection rule\n",
+ ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "%s: Couldn't delete MAC redirection rule",
dev->device->name);
- return;
+ return ret;
}
- if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
- RTE_LOG(ERR, PMD,
- "%s: Couldn't add MAC redirection rule\n",
+ ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "%s: Couldn't add MAC redirection rule",
dev->device->name);
+ return ret;
+ }
}
+
+ return 0;
}
static int
@@ -997,35 +1027,35 @@ tap_setup_queue(struct rte_eth_dev *dev,
}
if (*fd != -1) {
/* fd for this queue already exists */
- RTE_LOG(DEBUG, PMD, "%s: fd %d for %s queue qid %d exists\n",
+ TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
pmd->name, *fd, dir, qid);
} else if (*other_fd != -1) {
/* Only other_fd exists. dup it */
*fd = dup(*other_fd);
if (*fd < 0) {
*fd = -1;
- RTE_LOG(ERR, PMD, "%s: dup() failed.\n",
- pmd->name);
+ TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
return -1;
}
- RTE_LOG(DEBUG, PMD, "%s: dup fd %d for %s queue qid %d (%d)\n",
+ TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
pmd->name, *other_fd, dir, qid, *fd);
} else {
/* Both RX and TX fds do not exist (equal -1). Create fd */
- *fd = tun_alloc(pmd);
+ *fd = tun_alloc(pmd, 0);
if (*fd < 0) {
*fd = -1; /* restore original value */
- RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n",
- pmd->name);
+ TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
return -1;
}
- RTE_LOG(DEBUG, PMD, "%s: add %s queue for qid %d fd %d\n",
+ TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
pmd->name, dir, qid, *fd);
}
tx->mtu = &dev->data->mtu;
rx->rxmode = &dev->data->dev_conf.rxmode;
+ tx->type = pmd->type;
+
return *fd;
}
@@ -1049,25 +1079,12 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
int i;
if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
- RTE_LOG(WARNING, PMD,
- "nb_rx_queues %d too small or mempool NULL\n",
+ TAP_LOG(WARNING,
+ "nb_rx_queues %d too small or mempool NULL",
dev->data->nb_rx_queues);
return -1;
}
- /* Verify application offloads are valid for our port and queue. */
- if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
- rte_errno = ENOTSUP;
- RTE_LOG(ERR, PMD,
- "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64 "\n",
- (void *)dev, rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- (tap_rx_offload_get_port_capa() |
- tap_rx_offload_get_queue_capa()));
- return -rte_errno;
- }
rxq->mp = mp;
rxq->trigger_seen = 1; /* force initial burst */
rxq->in_port = dev->data->port_id;
@@ -1075,8 +1092,8 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
socket_id);
if (!iovecs) {
- RTE_LOG(WARNING, PMD,
- "%s: Couldn't allocate %d RX descriptors\n",
+ TAP_LOG(WARNING,
+ "%s: Couldn't allocate %d RX descriptors",
dev->device->name, nb_desc);
return -ENOMEM;
}
@@ -1095,8 +1112,8 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
for (i = 1; i <= nb_desc; i++) {
*tmp = rte_pktmbuf_alloc(rxq->mp);
if (!*tmp) {
- RTE_LOG(WARNING, PMD,
- "%s: couldn't allocate memory for queue %d\n",
+ TAP_LOG(WARNING,
+ "%s: couldn't allocate memory for queue %d",
dev->device->name, rx_queue_id);
ret = -ENOMEM;
goto error;
@@ -1108,7 +1125,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
tmp = &(*tmp)->next;
}
- RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n",
+ TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
return 0;
@@ -1131,39 +1148,24 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
struct pmd_internals *internals = dev->data->dev_private;
struct tx_queue *txq;
int ret;
+ uint64_t offloads;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -1;
dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
txq = dev->data->tx_queues[tx_queue_id];
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if (tx_conf != NULL &&
- !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
- if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
- txq->csum = !!(tx_conf->offloads &
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM));
- } else {
- rte_errno = ENOTSUP;
- RTE_LOG(ERR, PMD,
- "%p: Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- (void *)dev, tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- tap_tx_offload_get_port_capa());
- return -rte_errno;
- }
- }
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->csum = !!(offloads &
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM));
+
ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
if (ret == -1)
return -1;
- RTE_LOG(DEBUG, PMD,
- " TX TAP device name %s, qid %d on fd %d csum %s\n",
+ TAP_LOG(DEBUG,
+ " TX TUNTAP device name %s, qid %d on fd %d csum %s",
internals->name, tx_queue_id, internals->txq[tx_queue_id].fd,
txq->csum ? "on" : "off");
@@ -1307,6 +1309,39 @@ tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+/**
+ * DPDK callback to update the RSS hash configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] rss_conf
+ * RSS configuration data.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+tap_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ if (rss_conf->rss_hf & TAP_RSS_HF_MASK) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (rss_conf->rss_key && rss_conf->rss_key_len) {
+ /*
+ * Currently TAP RSS key is hard coded
+ * and cannot be updated
+ */
+ TAP_LOG(ERR,
+ "port %u RSS key cannot be updated",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ return 0;
+}
+
static const struct eth_dev_ops ops = {
.dev_start = tap_dev_start,
.dev_stop = tap_dev_stop,
@@ -1332,12 +1367,14 @@ static const struct eth_dev_ops ops = {
.stats_get = tap_stats_get,
.stats_reset = tap_stats_reset,
.dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
+ .rss_hash_update = tap_rss_hash_update,
.filter_ctrl = tap_dev_filter_ctrl,
};
static int
eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
- char *remote_iface, int fixed_mac_type)
+ char *remote_iface, struct ether_addr *mac_addr,
+ enum rte_tuntap_type type)
{
int numa_node = rte_socket_id();
struct rte_eth_dev *dev;
@@ -1346,34 +1383,31 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
struct ifreq ifr;
int i;
- RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
-
- data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
- if (!data) {
- RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
- goto error_exit_nodev;
- }
+ TAP_LOG(DEBUG, "%s device on numa %u",
+ tuntap_name, rte_socket_id());
dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
if (!dev) {
- RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
+ TAP_LOG(ERR, "%s Unable to allocate device struct",
+ tuntap_name);
goto error_exit_nodev;
}
pmd = dev->data->dev_private;
pmd->dev = dev;
snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
+ pmd->type = type;
pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
if (pmd->ioctl_sock == -1) {
- RTE_LOG(ERR, PMD,
- "TAP Unable to get a socket for management: %s\n",
- strerror(errno));
+ TAP_LOG(ERR,
+ "%s Unable to get a socket for management: %s",
+ tuntap_name, strerror(errno));
goto error_exit;
}
/* Setup some default values */
- rte_memcpy(data, dev->data, sizeof(*data));
+ data = dev->data;
data->dev_private = pmd;
data->dev_flags = RTE_ETH_DEV_INTR_LSC;
data->numa_node = numa_node;
@@ -1384,7 +1418,6 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
data->nb_rx_queues = 0;
data->nb_tx_queues = 0;
- dev->data = data;
dev->dev_ops = &ops;
dev->rx_pkt_burst = pmd_rx_burst;
dev->tx_pkt_burst = pmd_tx_burst;
@@ -1394,39 +1427,43 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
dev->intr_handle = &pmd->intr_handle;
/* Presetup the fds to -1 as being not valid */
+ pmd->ka_fd = -1;
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
pmd->rxq[i].fd = -1;
pmd->txq[i].fd = -1;
}
- if (fixed_mac_type) {
- /* fixed mac = 00:64:74:61:70:<iface_idx> */
- static int iface_idx;
- char mac[ETHER_ADDR_LEN] = "\0dtap";
-
- mac[ETHER_ADDR_LEN - 1] = iface_idx++;
- rte_memcpy(&pmd->eth_addr, mac, ETHER_ADDR_LEN);
- } else {
- eth_random_addr((uint8_t *)&pmd->eth_addr);
+ if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
+ if (is_zero_ether_addr(mac_addr))
+ eth_random_addr((uint8_t *)&pmd->eth_addr);
+ else
+ rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
}
- /* Immediately create the netdevice (this will create the 1st queue). */
- /* rx queue */
- if (tap_setup_queue(dev, pmd, 0, 1) == -1)
- goto error_exit;
- /* tx queue */
- if (tap_setup_queue(dev, pmd, 0, 0) == -1)
+ /*
+ * Allocate a TUN device keep-alive file descriptor that will only be
+ * closed when the TUN device itself is closed or removed.
+ * This keep-alive file descriptor will guarantee that the TUN device
+ * exists even when all of its queues are closed
+ */
+ pmd->ka_fd = tun_alloc(pmd, 1);
+ if (pmd->ka_fd == -1) {
+ TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
goto error_exit;
+ }
ifr.ifr_mtu = dev->data->mtu;
if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
goto error_exit;
- memset(&ifr, 0, sizeof(struct ifreq));
- ifr.ifr_hwaddr.sa_family = AF_LOCAL;
- rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, ETHER_ADDR_LEN);
- if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
- goto error_exit;
+ if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
+ memset(&ifr, 0, sizeof(struct ifreq));
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
+ ETHER_ADDR_LEN);
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
+ goto error_exit;
+ }
/*
* Set up everything related to rte_flow:
@@ -1438,22 +1475,22 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
*/
pmd->nlsk_fd = tap_nl_init(0);
if (pmd->nlsk_fd == -1) {
- RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n",
+ TAP_LOG(WARNING, "%s: failed to create netlink socket.",
pmd->name);
goto disable_rte_flow;
}
pmd->if_index = if_nametoindex(pmd->name);
if (!pmd->if_index) {
- RTE_LOG(ERR, PMD, "%s: failed to get if_index.\n", pmd->name);
+ TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name);
goto disable_rte_flow;
}
if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.\n",
+ TAP_LOG(ERR, "%s: failed to create multiq qdisc.",
pmd->name);
goto disable_rte_flow;
}
if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
+ TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
pmd->name);
goto disable_rte_flow;
}
@@ -1462,7 +1499,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
if (strlen(remote_iface)) {
pmd->remote_if_index = if_nametoindex(remote_iface);
if (!pmd->remote_if_index) {
- RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.\n",
+ TAP_LOG(ERR, "%s: failed to get %s if_index.",
pmd->name, remote_iface);
goto error_remote;
}
@@ -1474,7 +1511,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
/* Replicate remote MAC address */
if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
+ TAP_LOG(ERR, "%s: failed to get %s MAC address.",
pmd->name, pmd->remote_iface);
goto error_remote;
}
@@ -1482,7 +1519,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
ETHER_ADDR_LEN);
/* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
+ TAP_LOG(ERR, "%s: failed to get %s MAC address.",
pmd->name, remote_iface);
goto error_remote;
}
@@ -1495,7 +1532,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
if (qdisc_create_ingress(pmd->nlsk_fd,
pmd->remote_if_index) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
+ TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
pmd->remote_iface);
goto error_remote;
}
@@ -1504,26 +1541,27 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
- RTE_LOG(ERR, PMD,
- "%s: failed to create implicit rules.\n",
+ TAP_LOG(ERR,
+ "%s: failed to create implicit rules.",
pmd->name);
goto error_remote;
}
}
+ rte_eth_dev_probing_finish(dev);
return 0;
disable_rte_flow:
- RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n",
+ TAP_LOG(ERR, " Disabling rte flow support: %s(%d)",
strerror(errno), errno);
if (strlen(remote_iface)) {
- RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n");
+ TAP_LOG(ERR, "Remote feature requires flow support.");
goto error_exit;
}
return 0;
error_remote:
- RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n",
+ TAP_LOG(ERR, " Can't set up remote feature: %s(%d)",
strerror(errno), errno);
tap_flow_implicit_flush(pmd, NULL);
@@ -1533,10 +1571,9 @@ error_exit:
rte_eth_dev_release_port(dev);
error_exit_nodev:
- RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n",
- rte_vdev_device_name(vdev));
+ TAP_LOG(ERR, "%s Unable to initialize %s",
+ tuntap_name, rte_vdev_device_name(vdev));
- rte_free(data);
return -EINVAL;
}
@@ -1548,7 +1585,7 @@ set_interface_name(const char *key __rte_unused,
char *name = (char *)extra_args;
if (value)
- snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value);
+ strlcpy(name, value, RTE_ETH_NAME_MAX_LEN - 1);
else
snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
DEFAULT_TAP_NAME, (tap_unit - 1));
@@ -1564,20 +1601,134 @@ set_remote_iface(const char *key __rte_unused,
char *name = (char *)extra_args;
if (value)
- snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s", value);
+ strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
return 0;
}
+static int parse_user_mac(struct ether_addr *user_mac,
+ const char *value)
+{
+ unsigned int index = 0;
+ char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
+
+ if (user_mac == NULL || value == NULL)
+ return 0;
+
+ strlcpy(mac_temp, value, sizeof(mac_temp));
+ mac_byte = strtok(mac_temp, ":");
+
+ while ((mac_byte != NULL) &&
+ (strlen(mac_byte) <= 2) &&
+ (strlen(mac_byte) == strspn(mac_byte,
+ ETH_TAP_CMP_MAC_FMT))) {
+ user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
+ mac_byte = strtok(NULL, ":");
+ }
+
+ return index;
+}
+
static int
set_mac_type(const char *key __rte_unused,
const char *value,
void *extra_args)
{
- if (value &&
- !strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED)))
- *(int *)extra_args = 1;
+ struct ether_addr *user_mac = extra_args;
+
+ if (!value)
+ return 0;
+
+ if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
+ static int iface_idx;
+
+ /* fixed mac = 00:64:74:61:70:<iface_idx> */
+ memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN);
+ user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';
+ goto success;
+ }
+
+ if (parse_user_mac(user_mac, value) != 6)
+ goto error;
+success:
+ TAP_LOG(DEBUG, "TAP user MAC param (%s)", value);
return 0;
+
+error:
+ TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)",
+ value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
+ return -1;
+}
+
+/*
+ * Open a TUN interface device. TUN PMD
+ * 1) sets tap_type as false
+ * 2) intakes iface as argument.
+ * 3) as interface is virtual set speed to 10G
+ */
+static int
+rte_pmd_tun_probe(struct rte_vdev_device *dev)
+{
+ const char *name, *params;
+ int ret;
+ struct rte_kvargs *kvlist = NULL;
+ char tun_name[RTE_ETH_NAME_MAX_LEN];
+ char remote_iface[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+
+ strcpy(tuntap_name, "TUN");
+
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+ memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ TAP_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ eth_dev->dev_ops = &ops;
+ return 0;
+ }
+
+ snprintf(tun_name, sizeof(tun_name), "%s%u",
+ DEFAULT_TUN_NAME, tun_unit++);
+
+ if (params && (params[0] != '\0')) {
+ TAP_LOG(DEBUG, "parameters (%s)", params);
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist) {
+ if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_IFACE_ARG,
+ &set_interface_name,
+ tun_name);
+
+ if (ret == -1)
+ goto leave;
+ }
+ }
+ }
+ pmd_link.link_speed = ETH_SPEED_NUM_10G;
+
+ TAP_LOG(NOTICE, "Initializing pmd_tun for %s as %s",
+ name, tun_name);
+
+ ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0,
+ ETH_TUNTAP_TYPE_TUN);
+
+leave:
+ if (ret == -1) {
+ TAP_LOG(ERR, "Failed to create pmd for %s as %s",
+ name, tun_name);
+ tun_unit--; /* Restore the unit number */
+ }
+ rte_kvargs_free(kvlist);
+
+ return ret;
}
/* Open a TAP interface device.
@@ -1591,18 +1742,34 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
int speed;
char tap_name[RTE_ETH_NAME_MAX_LEN];
char remote_iface[RTE_ETH_NAME_MAX_LEN];
- int fixed_mac_type = 0;
+ struct ether_addr user_mac = { .addr_bytes = {0} };
+ struct rte_eth_dev *eth_dev;
+
+ strcpy(tuntap_name, "TAP");
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ TAP_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
speed = ETH_SPEED_NUM_10G;
snprintf(tap_name, sizeof(tap_name), "%s%d",
DEFAULT_TAP_NAME, tap_unit++);
memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
if (params && (params[0] != '\0')) {
- RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params);
+ TAP_LOG(DEBUG, "parameters (%s)", params);
kvlist = rte_kvargs_parse(params, valid_arguments);
if (kvlist) {
@@ -1628,7 +1795,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
ret = rte_kvargs_process(kvlist,
ETH_TAP_MAC_ARG,
&set_mac_type,
- &fixed_mac_type);
+ &user_mac);
if (ret == -1)
goto leave;
}
@@ -1636,14 +1803,15 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
}
pmd_link.link_speed = speed;
- RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
+ TAP_LOG(NOTICE, "Initializing pmd_tap for %s as %s",
name, tap_name);
- ret = eth_dev_tap_create(dev, tap_name, remote_iface, fixed_mac_type);
+ ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac,
+ ETH_TUNTAP_TYPE_TAP);
leave:
if (ret == -1) {
- RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
+ TAP_LOG(ERR, "Failed to create pmd for %s as %s",
name, tap_name);
tap_unit--; /* Restore the unit number */
}
@@ -1652,7 +1820,7 @@ leave:
return ret;
}
-/* detach a TAP device.
+/* detach a TUNTAP device.
*/
static int
rte_pmd_tap_remove(struct rte_vdev_device *dev)
@@ -1661,15 +1829,17 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
struct pmd_internals *internals;
int i;
- RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
- rte_socket_id());
-
/* find the ethdev entry */
eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (!eth_dev)
return 0;
internals = eth_dev->data->dev_private;
+
+ TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
+ (internals->type == ETH_TUNTAP_TYPE_TAP) ? "TAP" : "TUN",
+ rte_socket_id());
+
if (internals->nlsk_fd) {
tap_flow_flush(eth_dev, NULL);
tap_flow_implicit_flush(internals, NULL);
@@ -1688,20 +1858,41 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
close(internals->ioctl_sock);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
-
rte_eth_dev_release_port(eth_dev);
+ if (internals->ka_fd != -1) {
+ close(internals->ka_fd);
+ internals->ka_fd = -1;
+ }
return 0;
}
+static struct rte_vdev_driver pmd_tun_drv = {
+ .probe = rte_pmd_tun_probe,
+ .remove = rte_pmd_tap_remove,
+};
+
static struct rte_vdev_driver pmd_tap_drv = {
.probe = rte_pmd_tap_probe,
.remove = rte_pmd_tap_remove,
};
+
RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
+RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
+RTE_PMD_REGISTER_PARAM_STRING(net_tun,
+ ETH_TAP_IFACE_ARG "=<string> ");
RTE_PMD_REGISTER_PARAM_STRING(net_tap,
ETH_TAP_IFACE_ARG "=<string> "
- ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " "
+ ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
ETH_TAP_REMOTE_ARG "=<string>");
+int tap_logtype;
+
+RTE_INIT(tap_init_log);
+static void
+tap_init_log(void)
+{
+ tap_logtype = rte_log_register("pmd.net.tap");
+ if (tap_logtype >= 0)
+ rte_log_set_level(tap_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index 53a506ad..7b21d0d8 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _RTE_ETH_TAP_H_
@@ -15,6 +15,7 @@
#include <rte_ethdev_driver.h>
#include <rte_ether.h>
+#include "tap_log.h"
#ifdef IFF_MULTI_QUEUE
#define RTE_PMD_TAP_MAX_QUEUES TAP_MAX_QUEUES
@@ -22,6 +23,13 @@
#define RTE_PMD_TAP_MAX_QUEUES 1
#endif
+enum rte_tuntap_type {
+ ETH_TUNTAP_TYPE_UNKNOWN,
+ ETH_TUNTAP_TYPE_TUN,
+ ETH_TUNTAP_TYPE_TAP,
+ ETH_TUNTAP_TYPE_MAX,
+};
+
struct pkt_stats {
uint64_t opackets; /* Number of output packets */
uint64_t ipackets; /* Number of input packets */
@@ -47,6 +55,7 @@ struct rx_queue {
struct tx_queue {
int fd;
+ int type; /* Type field - TUN|TAP */
uint16_t *mtu; /* Pointer to MTU from dev_data */
uint16_t csum:1; /* Enable checksum offloading */
struct pkt_stats stats; /* Stats for this TX queue */
@@ -56,6 +65,7 @@ struct pmd_internals {
struct rte_eth_dev *dev; /* Ethernet device. */
char remote_iface[RTE_ETH_NAME_MAX_LEN]; /* Remote netdevice name */
char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */
+ int type; /* Type field - TUN|TAP */
struct ether_addr eth_addr; /* Mac address of the device port */
struct ifreq remote_initial_flags; /* Remote netdevice flags on init */
int remote_if_index; /* remote netdevice IF_INDEX */
@@ -76,6 +86,7 @@ struct pmd_internals {
struct rx_queue rxq[RTE_PMD_TAP_MAX_QUEUES]; /* List of RX queues */
struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
struct rte_intr_handle intr_handle; /* LSC interrupt handle. */
+ int ka_fd; /* keep-alive file descriptor */
};
/* tap_intr.c */
diff --git a/drivers/net/tap/tap_bpf.h b/drivers/net/tap/tap_bpf.h
index 1a70ffe2..9192686a 100644
--- a/drivers/net/tap/tap_bpf.h
+++ b/drivers/net/tap/tap_bpf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef __TAP_BPF_H__
diff --git a/drivers/net/tap/tap_bpf_api.c b/drivers/net/tap/tap_bpf_api.c
index 109a681e..98f6a760 100644
--- a/drivers/net/tap/tap_bpf_api.c
+++ b/drivers/net/tap/tap_bpf_api.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <errno.h>
diff --git a/drivers/net/tap/tap_bpf_insns.h b/drivers/net/tap/tap_bpf_insns.h
index 89873b6d..79e3e66b 100644
--- a/drivers/net/tap/tap_bpf_insns.h
+++ b/drivers/net/tap/tap_bpf_insns.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <tap_bpf.h>
diff --git a/drivers/net/tap/tap_bpf_program.c b/drivers/net/tap/tap_bpf_program.c
index 8abb3b76..1cb73822 100644
--- a/drivers/net/tap/tap_bpf_program.c
+++ b/drivers/net/tap/tap_bpf_program.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <stdint.h>
@@ -84,7 +84,7 @@ struct ipv6_l3_l4_tuple {
__u16 sport;
} __attribute__((packed));
-static const __u8 def_rss_key[] = {
+static const __u8 def_rss_key[TAP_RSS_HASH_KEY_SIZE] = {
0xd1, 0x81, 0xc6, 0x2c,
0xf7, 0xf4, 0xdb, 0x5b,
0x19, 0x83, 0xa2, 0xfc,
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index 551b2d83..6b60e6dc 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <errno.h>
@@ -270,13 +270,13 @@ static const struct tap_flow_items tap_flow_items[] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_IPV6),
.mask = &(const struct rte_flow_item_vlan){
- .tpid = -1,
/* DEI matching is not supported */
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
.tci = 0xffef,
#else
.tci = 0xefff,
#endif
+ .inner_type = -1,
},
.mask_sz = sizeof(struct rte_flow_item_vlan),
.default_mask = &rte_flow_item_vlan_mask,
@@ -578,13 +578,19 @@ tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
/* use default mask if none provided */
if (!mask)
mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
- /* TC does not support tpid masking. Only accept if exact match. */
- if (mask->tpid && mask->tpid != 0xffff)
+ /* Outer TPID cannot be matched. */
+ if (info->eth_type)
return -1;
/* Double-tagging not supported. */
- if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q))
+ if (info->vlan)
return -1;
info->vlan = 1;
+ if (mask->inner_type) {
+ /* TC does not support partial eth_type masking */
+ if (mask->inner_type != RTE_BE16(0xffff))
+ return -1;
+ info->eth_type = spec->inner_type;
+ }
if (!flow)
return 0;
msg = &flow->msg;
@@ -1033,6 +1039,12 @@ priv_flow_process(struct pmd_internals *pmd,
};
int action = 0; /* Only one action authorized for now */
+ if (attr->transfer) {
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
+ return -rte_errno;
+ }
if (attr->group > MAX_GROUP) {
rte_flow_error_set(
error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
@@ -1140,6 +1152,7 @@ priv_flow_process(struct pmd_internals *pmd,
else
goto end;
}
+actions:
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
int err = 0;
@@ -1214,7 +1227,7 @@ priv_flow_process(struct pmd_internals *pmd,
if (err)
goto exit_action_not_supported;
}
- if (flow && rss)
+ if (flow)
err = rss_add_actions(flow, pmd, rss, error);
} else {
goto exit_action_not_supported;
@@ -1222,6 +1235,16 @@ priv_flow_process(struct pmd_internals *pmd,
if (err)
goto exit_action_not_supported;
}
+ /* When fate is unknown, drop traffic. */
+ if (!action) {
+ static const struct rte_flow_action drop[] = {
+ { .type = RTE_FLOW_ACTION_TYPE_DROP, },
+ { .type = RTE_FLOW_ACTION_TYPE_END, },
+ };
+
+ actions = drop;
+ goto actions;
+ }
end:
if (flow)
tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
@@ -1376,8 +1399,8 @@ tap_flow_create(struct rte_eth_dev *dev,
}
err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule creation (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
errno, strerror(errno));
rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL,
@@ -1421,8 +1444,8 @@ tap_flow_create(struct rte_eth_dev *dev,
}
err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule creation (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1476,8 +1499,8 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd,
if (ret < 0 && errno == ENOENT)
ret = 0;
if (ret < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule deletion (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule deletion (%d): %s",
errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -1500,8 +1523,8 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd,
if (ret < 0 && errno == ENOENT)
ret = 0;
if (ret < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule deletion (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule deletion (%d): %s",
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1545,10 +1568,14 @@ tap_flow_isolate(struct rte_eth_dev *dev,
{
struct pmd_internals *pmd = dev->data->dev_private;
+ /* normalize 'set' variable to contain 0 or 1 values */
if (set)
- pmd->flow_isolate = 1;
- else
- pmd->flow_isolate = 0;
+ set = 1;
+ /* if already in the right isolation mode - nothing to do */
+ if ((set ^ pmd->flow_isolate) == 0)
+ return 0;
+ /* mark the isolation mode for tap_flow_implicit_create() */
+ pmd->flow_isolate = set;
/*
* If netdevice is there, setup appropriate flow rules immediately.
* Otherwise it will be set when bringing up the netdevice (tun_alloc).
@@ -1556,20 +1583,20 @@ tap_flow_isolate(struct rte_eth_dev *dev,
if (!pmd->rxq[0].fd)
return 0;
if (set) {
- struct rte_flow *flow;
+ struct rte_flow *remote_flow;
while (1) {
- flow = LIST_FIRST(&pmd->implicit_flows);
- if (!flow)
+ remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ if (!remote_flow)
break;
/*
* Remove all implicit rules on the remote.
* Keep the local rule to redirect packets on TX.
* Keep also the last implicit local rule: ISOLATE.
*/
- if (flow->msg.t.tcm_ifindex == pmd->if_index)
+ if (remote_flow->msg.t.tcm_ifindex == pmd->if_index)
break;
- if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0)
+ if (tap_flow_destroy_pmd(pmd, remote_flow, NULL) < 0)
goto error;
}
/* Switch the TC rule according to pmd->flow_isolate */
@@ -1665,7 +1692,7 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
if (!remote_flow) {
- RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow\n");
+ TAP_LOG(ERR, "Cannot allocate memory for rte_flow");
goto fail;
}
msg = &remote_flow->msg;
@@ -1706,21 +1733,21 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
tap_flow_set_handle(remote_flow);
if (priv_flow_process(pmd, attr, items, actions, NULL,
remote_flow, implicit_rte_flows[idx].mirred)) {
- RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
+ TAP_LOG(ERR, "rte flow rule validation failed");
goto fail;
}
err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
if (err < 0) {
- RTE_LOG(ERR, PMD, "Failure sending nl request\n");
+ TAP_LOG(ERR, "Failure sending nl request");
goto fail;
}
err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
- /* Silently ignore re-entering remote promiscuous rule */
- if (errno == EEXIST && idx == TAP_REMOTE_PROMISC)
+ /* Silently ignore re-entering existing rule */
+ if (errno == EEXIST)
goto success;
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule creation (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
errno, strerror(errno));
goto fail;
}
@@ -1836,8 +1863,8 @@ static int rss_enable(struct pmd_internals *pmd,
sizeof(struct rss_key),
MAX_RSS_KEYS);
if (pmd->map_fd < 0) {
- RTE_LOG(ERR, PMD,
- "Failed to create BPF map (%d): %s\n",
+ TAP_LOG(ERR,
+ "Failed to create BPF map (%d): %s",
errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -1854,7 +1881,7 @@ static int rss_enable(struct pmd_internals *pmd,
for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
if (pmd->bpf_fd[i] < 0) {
- RTE_LOG(ERR, PMD,
+ TAP_LOG(ERR,
"Failed to load BPF section %s for queue %d",
SEC_NAME_CLS_Q, i);
rte_flow_error_set(
@@ -1868,7 +1895,7 @@ static int rss_enable(struct pmd_internals *pmd,
rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
if (!rss_flow) {
- RTE_LOG(ERR, PMD,
+ TAP_LOG(ERR,
"Cannot allocate memory for rte_flow");
return -1;
}
@@ -1911,8 +1938,8 @@ static int rss_enable(struct pmd_internals *pmd,
return -1;
err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule creation (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
errno, strerror(errno));
return err;
}
@@ -2039,11 +2066,21 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
struct rte_flow_error *error)
{
/* 4096 is the maximum number of instructions for a BPF program */
- int i;
+ unsigned int i;
int err;
struct rss_key rss_entry = { .hash_fields = 0,
.key_size = 0 };
+ /* Check supported RSS features */
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "a nonzero RSS encapsulation level is not supported");
+
/* Get a new map key for a new RSS rule */
err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
if (err < 0) {
@@ -2055,8 +2092,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
}
/* Update RSS map entry with queues */
- rss_entry.nb_queues = rss->num;
- for (i = 0; i < rss->num; i++)
+ rss_entry.nb_queues = rss->queue_num;
+ for (i = 0; i < rss->queue_num; i++)
rss_entry.queues[i] = rss->queue[i];
rss_entry.hash_fields =
(1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
@@ -2066,8 +2103,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
&flow->key_idx, &rss_entry);
if (err) {
- RTE_LOG(ERR, PMD,
- "Failed to update BPF map entry #%u (%d): %s\n",
+ TAP_LOG(ERR,
+ "Failed to update BPF map entry #%u (%d): %s",
flow->key_idx, errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -2085,8 +2122,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
flow->bpf_fd[SEC_L3_L4] =
tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
if (flow->bpf_fd[SEC_L3_L4] < 0) {
- RTE_LOG(ERR, PMD,
- "Failed to load BPF section %s (%d): %s\n",
+ TAP_LOG(ERR,
+ "Failed to load BPF section %s (%d): %s",
sec_name[SEC_L3_L4], errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -2147,9 +2184,8 @@ tap_dev_filter_ctrl(struct rte_eth_dev *dev,
*(const void **)arg = &tap_flow_ops;
return 0;
default:
- RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n",
- (void *)dev, filter_type);
+ TAP_LOG(ERR, "%p: filter type (%d) not supported",
+ dev, filter_type);
}
return -EINVAL;
}
-
diff --git a/drivers/net/tap/tap_flow.h b/drivers/net/tap/tap_flow.h
index ac6a952d..ac60a9ae 100644
--- a/drivers/net/tap/tap_flow.h
+++ b/drivers/net/tap/tap_flow.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _TAP_FLOW_H_
diff --git a/drivers/net/tap/tap_intr.c b/drivers/net/tap/tap_intr.c
index b0e19914..fc590181 100644
--- a/drivers/net/tap/tap_intr.c
+++ b/drivers/net/tap/tap_intr.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 Mellanox Technologies, Ltd.
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
/**
@@ -62,7 +62,7 @@ tap_rx_intr_vec_install(struct rte_eth_dev *dev)
intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
if (intr_handle->intr_vec == NULL) {
rte_errno = ENOMEM;
- RTE_LOG(ERR, PMD,
+ TAP_LOG(ERR,
"failed to allocate memory for interrupt vector,"
" Rx interrupts will not be supported");
return -rte_errno;
diff --git a/drivers/net/tap/tap_log.h b/drivers/net/tap/tap_log.h
new file mode 100644
index 00000000..fa06843a
--- /dev/null
+++ b/drivers/net/tap/tap_log.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+extern int tap_logtype;
+
+#define TAP_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, tap_logtype, "%s(): " fmt "\n", \
+ __func__, ## args)
diff --git a/drivers/net/tap/tap_netlink.c b/drivers/net/tap/tap_netlink.c
index 82c8dc0e..6cb51009 100644
--- a/drivers/net/tap/tap_netlink.c
+++ b/drivers/net/tap/tap_netlink.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <errno.h>
@@ -13,6 +13,7 @@
#include <rte_malloc.h>
#include <tap_netlink.h>
#include <rte_random.h>
+#include "tap_log.h"
/* Must be quite large to support dumping a huge list of QDISC or filters. */
#define BUF_SIZE (32 * 1024) /* Size of the buffer to receive kernel messages */
@@ -45,19 +46,19 @@ tap_nl_init(uint32_t nl_groups)
fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
if (fd < 0) {
- RTE_LOG(ERR, PMD, "Unable to create a netlink socket\n");
+ TAP_LOG(ERR, "Unable to create a netlink socket");
return -1;
}
if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int))) {
- RTE_LOG(ERR, PMD, "Unable to set socket buffer send size\n");
+ TAP_LOG(ERR, "Unable to set socket buffer send size");
return -1;
}
if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int))) {
- RTE_LOG(ERR, PMD, "Unable to set socket buffer receive size\n");
+ TAP_LOG(ERR, "Unable to set socket buffer receive size");
return -1;
}
if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) {
- RTE_LOG(ERR, PMD, "Unable to bind to the netlink socket\n");
+ TAP_LOG(ERR, "Unable to bind to the netlink socket");
return -1;
}
return fd;
@@ -76,7 +77,7 @@ int
tap_nl_final(int nlsk_fd)
{
if (close(nlsk_fd)) {
- RTE_LOG(ERR, PMD, "Failed to close netlink socket: %s (%d)\n",
+ TAP_LOG(ERR, "Failed to close netlink socket: %s (%d)",
strerror(errno), errno);
return -1;
}
@@ -117,7 +118,7 @@ tap_nl_send(int nlsk_fd, struct nlmsghdr *nh)
nh->nlmsg_seq = (uint32_t)rte_rand();
send_bytes = sendmsg(nlsk_fd, &msg, 0);
if (send_bytes < 0) {
- RTE_LOG(ERR, PMD, "Failed to send netlink message: %s (%d)\n",
+ TAP_LOG(ERR, "Failed to send netlink message: %s (%d)",
strerror(errno), errno);
return -1;
}
@@ -300,9 +301,8 @@ tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type)
tail = rte_zmalloc(NULL, sizeof(struct nested_tail), 0);
if (!tail) {
- RTE_LOG(ERR, PMD,
- "Couldn't allocate memory for nested netlink"
- " attribute\n");
+ TAP_LOG(ERR,
+ "Couldn't allocate memory for nested netlink attribute");
return -1;
}
diff --git a/drivers/net/tap/tap_netlink.h b/drivers/net/tap/tap_netlink.h
index fafef840..faa73ba1 100644
--- a/drivers/net/tap/tap_netlink.h
+++ b/drivers/net/tap/tap_netlink.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _TAP_NETLINK_H_
diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h
index 3bb0d140..17606b2d 100644
--- a/drivers/net/tap/tap_rss.h
+++ b/drivers/net/tap/tap_rss.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _TAP_RSS_H_
@@ -9,6 +9,12 @@
#define TAP_MAX_QUEUES 16
#endif
+/* Fixed RSS hash key size in bytes. */
+#define TAP_RSS_HASH_KEY_SIZE 40
+
+/* Supported RSS */
+#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+
/* hashed fields for RSS */
enum hash_field {
HASH_FIELD_IPV4_L3, /* IPv4 src/dst addr */
diff --git a/drivers/net/tap/tap_tcmsgs.c b/drivers/net/tap/tap_tcmsgs.c
index 954f13eb..3c9d0366 100644
--- a/drivers/net/tap/tap_tcmsgs.c
+++ b/drivers/net/tap/tap_tcmsgs.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <inttypes.h>
@@ -10,6 +10,7 @@
#include <rte_log.h>
#include <tap_tcmsgs.h>
+#include "tap_log.h"
struct qdisc {
uint32_t handle;
@@ -81,8 +82,8 @@ qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo)
if (!nlsk_fd) {
fd = tap_nl_init(0);
if (fd < 0) {
- RTE_LOG(ERR, PMD,
- "Could not delete QDISC: null netlink socket\n");
+ TAP_LOG(ERR,
+ "Could not delete QDISC: null netlink socket");
return -1;
}
} else {
@@ -261,7 +262,7 @@ qdisc_create_multiq(int nlsk_fd, uint16_t ifindex)
err = qdisc_add_multiq(nlsk_fd, ifindex);
if (err < 0 && errno != -EEXIST) {
- RTE_LOG(ERR, PMD, "Could not add multiq qdisc (%d): %s\n",
+ TAP_LOG(ERR, "Could not add multiq qdisc (%d): %s",
errno, strerror(errno));
return -1;
}
@@ -287,7 +288,7 @@ qdisc_create_ingress(int nlsk_fd, uint16_t ifindex)
err = qdisc_add_ingress(nlsk_fd, ifindex);
if (err < 0 && errno != -EEXIST) {
- RTE_LOG(ERR, PMD, "Could not add ingress qdisc (%d): %s\n",
+ TAP_LOG(ERR, "Could not add ingress qdisc (%d): %s",
errno, strerror(errno));
return -1;
}
diff --git a/drivers/net/tap/tap_tcmsgs.h b/drivers/net/tap/tap_tcmsgs.h
index f72f8c5c..8cedea84 100644
--- a/drivers/net/tap/tap_tcmsgs.h
+++ b/drivers/net/tap/tap_tcmsgs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _TAP_TCMSGS_H_
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index b13c21ff..b12c8ec5 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -171,7 +171,10 @@
/* Min/Max packet size */
#define NIC_HW_MIN_FRS (64)
-#define NIC_HW_MAX_FRS (9200) /* 9216 max pkt including FCS */
+/* ETH_HLEN+ETH_FCS_LEN+2*VLAN_HLEN */
+#define NIC_HW_L2_OVERHEAD (26)
+#define NIC_HW_MAX_MTU (9190)
+#define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD)
#define NIC_HW_MAX_SEGS (12)
/* Descriptor alignments */
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index a65361fb..99fcd516 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -15,7 +15,6 @@
#include <sys/queue.h>
#include <rte_alarm.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_byteorder.h>
#include <rte_common.h>
@@ -69,25 +68,14 @@ nicvf_init_log(void)
rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
}
-static inline int
-nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
+static void
+nicvf_link_status_update(struct nicvf *nic,
+ struct rte_eth_link *link)
{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
+ memset(link, 0, sizeof(*link));
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
+ link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
-static inline void
-nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
-{
- link->link_status = nic->link_up;
- link->link_duplex = ETH_LINK_AUTONEG;
if (nic->duplex == NICVF_HALF_DUPLEX)
link->link_duplex = ETH_LINK_HALF_DUPLEX;
else if (nic->duplex == NICVF_FULL_DUPLEX)
@@ -101,12 +89,17 @@ nicvf_interrupt(void *arg)
{
struct rte_eth_dev *dev = arg;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_eth_link link;
if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
- if (dev->data->dev_conf.intr_conf.lsc)
- nicvf_set_eth_link_status(nic, &dev->data->dev_link);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ nicvf_link_status_update(nic, &link);
+ rte_eth_linkstatus_set(dev, &link);
+
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
}
rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
@@ -153,24 +146,23 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
if (wait_to_complete) {
/* rte_eth_link_get() might need to wait up to 9 seconds */
for (i = 0; i < MAX_CHECK_TIME; i++) {
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
- if (link.link_status)
+ nicvf_link_status_update(nic, &link);
+ if (link.link_status == ETH_LINK_UP)
break;
rte_delay_ms(CHECK_INTERVAL);
}
} else {
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
+ nicvf_link_status_update(nic, &link);
}
- return nicvf_atomic_write_link_status(dev, &link);
+
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
size_t i;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
@@ -188,7 +180,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* Refuse mtu that requires the support of scattered packets
* when this feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (dev->data->dev_started && !dev->data->scattered_rx &&
(frame_size + 2 * VLAN_TAG_SIZE > buffsz))
return -EINVAL;
@@ -202,11 +194,11 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
else
rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
+ if (nicvf_mbox_update_hw_max_frs(nic, mtu))
return -EINVAL;
- /* Update max frame size */
- rxmode->max_rx_pkt_len = (uint32_t)frame_size;
+ /* Update max_rx_pkt_len */
+ rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;
nic->mtu = mtu;
for (i = 0; i < nic->sqs_count; i++)
@@ -939,7 +931,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
bool is_single_pool;
struct nicvf_txq *txq;
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint64_t conf_offloads, offload_capa, unsupported_offloads;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
@@ -953,17 +945,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
- conf_offloads = tx_conf->offloads;
- offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
- unsupported_offloads = conf_offloads & ~offload_capa;
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- unsupported_offloads, conf_offloads, offload_capa);
- return -ENOTSUP;
- }
-
/* Tx deferred start is not supported */
if (tx_conf->tx_deferred_start) {
PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1015,9 +996,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
txq->tx_free_thresh = tx_free_thresh;
txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
- txq->offloads = conf_offloads;
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->offloads = offloads;
- is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+ is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
/* Choose optimum free threshold value for multipool case */
if (!is_single_pool) {
@@ -1277,7 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t rx_free_thresh;
struct nicvf_rxq *rxq;
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint64_t conf_offloads, offload_capa, unsupported_offloads;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
@@ -1291,24 +1273,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
-
- conf_offloads = rx_conf->offloads;
-
- if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
- PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
- conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
- }
-
- offload_capa = NICVF_RX_OFFLOAD_CAPA;
- unsupported_offloads = conf_offloads & ~offload_capa;
-
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- unsupported_offloads, conf_offloads, offload_capa);
- return -ENOTSUP;
- }
-
/* Mempool memory must be contiguous, so must be one memory segment*/
if (mp->nb_mem_chunks != 1) {
PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1316,7 +1280,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
}
/* Mempool memory must be physically contiguous */
- if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
return -EINVAL;
}
@@ -1389,10 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
nicvf_rx_queue_reset(rxq);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
- rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+ rte_mempool_avail_count(mp), rxq->phys, offloads);
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1408,8 +1373,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
/* Autonegotiation may be disabled */
dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
@@ -1418,7 +1381,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
+ dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;
dev_info->max_rx_queues =
(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_tx_queues =
@@ -1445,12 +1408,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
- .txq_flags =
- ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOREFCOUNT |
- ETH_TXQ_FLAGS_NOMULTMEMP |
- ETH_TXQ_FLAGS_NOVLANOFFL |
- ETH_TXQ_FLAGS_NOXSUMSCTP,
.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -1751,8 +1708,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
/* Setup MTU based on max_rx_pkt_len or default */
mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
dev->data->dev_conf.rxmode.max_rx_pkt_len
- - ETHER_HDR_LEN - ETHER_CRC_LEN
- : ETHER_MTU;
+ - ETHER_HDR_LEN : ETHER_MTU;
if (nicvf_dev_set_mtu(dev, mtu)) {
PMD_INIT_LOG(ERR, "Failed to set default mtu size");
@@ -1923,8 +1879,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_txmode *txmode = &conf->txmode;
struct nicvf *nic = nicvf_pmd_priv(dev);
uint8_t cqcount;
- uint64_t conf_rx_offloads, rx_offload_capa;
- uint64_t conf_tx_offloads, tx_offload_capa;
PMD_INIT_FUNC_TRACE();
@@ -1933,32 +1887,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
- tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
- if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
- PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- conf_tx_offloads, tx_offload_capa);
- return -ENOTSUP;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
- PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
- rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
- }
-
- conf_rx_offloads = rxmode->offloads;
- rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
- if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
- PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- conf_rx_offloads, rx_offload_capa);
- return -ENOTSUP;
- }
-
- if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
diff --git a/drivers/net/vdev_netvsc/Makefile b/drivers/net/vdev_netvsc/Makefile
index 7be17137..690cb8f8 100644
--- a/drivers/net/vdev_netvsc/Makefile
+++ b/drivers/net/vdev_netvsc/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017 6WIND S.A.
-# Copyright 2017 Mellanox Technologies, Ltd.
+# Copyright 2017 Mellanox Technologies, Ltd
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/vdev_netvsc/vdev_netvsc.c b/drivers/net/vdev_netvsc/vdev_netvsc.c
index cbf4d590..48717f2f 100644
--- a/drivers/net/vdev_netvsc/vdev_netvsc.c
+++ b/drivers/net/vdev_netvsc/vdev_netvsc.c
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <linux/sockios.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <netinet/ip.h>
@@ -33,9 +35,11 @@
#include <rte_hypervisor.h>
#include <rte_kvargs.h>
#include <rte_log.h>
+#include <rte_string_fns.h>
#define VDEV_NETVSC_DRIVER net_vdev_netvsc
#define VDEV_NETVSC_DRIVER_NAME RTE_STR(VDEV_NETVSC_DRIVER)
+#define VDEV_NETVSC_DRIVER_NAME_LEN 15
#define VDEV_NETVSC_ARG_IFACE "iface"
#define VDEV_NETVSC_ARG_MAC "mac"
#define VDEV_NETVSC_ARG_FORCE "force"
@@ -96,6 +100,43 @@ vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx)
}
/**
+ * Determine if a network interface is NetVSC.
+ *
+ * @param[in] iface
+ * Pointer to netdevice description structure (name and index).
+ *
+ * @return
+ * A nonzero value when interface is detected as NetVSC. In case of error,
+ * rte_errno is updated and 0 returned.
+ */
+static int
+vdev_netvsc_iface_is_netvsc(const struct if_nameindex *iface)
+{
+ static const char temp[] = "/sys/class/net/%s/device/class_id";
+ char path[sizeof(temp) + IF_NAMESIZE];
+ FILE *f;
+ int ret;
+ int len = 0;
+
+ ret = snprintf(path, sizeof(path), temp, iface->if_name);
+ if (ret == -1 || (size_t)ret >= sizeof(path)) {
+ rte_errno = ENOBUFS;
+ return 0;
+ }
+ f = fopen(path, "r");
+ if (!f) {
+ rte_errno = errno;
+ return 0;
+ }
+ ret = fscanf(f, NETVSC_CLASS_ID "%n", &len);
+ if (ret == EOF)
+ rte_errno = errno;
+ ret = len == (int)strlen(NETVSC_CLASS_ID);
+ fclose(f);
+ return ret;
+}
+
+/**
* Iterate over system network interfaces.
*
* This function runs a given callback function for each netdevice found on
@@ -104,6 +145,8 @@ vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx)
* @param func
* Callback function pointer. List traversal is aborted when this function
* returns a nonzero value.
+ * @param is_netvsc
+ * Indicates the device type to iterate - netvsc or non-netvsc.
* @param ...
* Variable parameter list passed as @p va_list to @p func.
*
@@ -115,7 +158,7 @@ vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx)
static int
vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface,
const struct ether_addr *eth_addr,
- va_list ap), ...)
+ va_list ap), int is_netvsc, ...)
{
struct if_nameindex *iface = if_nameindex();
int s = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
@@ -133,11 +176,15 @@ vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface,
goto error;
}
for (i = 0; iface[i].if_name; ++i) {
+ int is_netvsc_ret;
struct ifreq req;
struct ether_addr eth_addr;
va_list ap;
- strncpy(req.ifr_name, iface[i].if_name, sizeof(req.ifr_name));
+ is_netvsc_ret = vdev_netvsc_iface_is_netvsc(&iface[i]) ? 1 : 0;
+ if (is_netvsc ^ is_netvsc_ret)
+ continue;
+ strlcpy(req.ifr_name, iface[i].if_name, sizeof(req.ifr_name));
if (ioctl(s, SIOCGIFHWADDR, &req) == -1) {
DRV_LOG(WARNING, "cannot retrieve information about"
" interface \"%s\": %s",
@@ -151,7 +198,7 @@ vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface,
}
memcpy(eth_addr.addr_bytes, req.ifr_hwaddr.sa_data,
RTE_DIM(eth_addr.addr_bytes));
- va_start(ap, func);
+ va_start(ap, is_netvsc);
ret = func(&iface[i], &eth_addr, ap);
va_end(ap);
if (ret)
@@ -166,77 +213,100 @@ error:
}
/**
- * Determine if a network interface is NetVSC.
- *
- * @param[in] iface
- * Pointer to netdevice description structure (name and index).
- *
- * @return
- * A nonzero value when interface is detected as NetVSC. In case of error,
- * rte_errno is updated and 0 returned.
- */
-static int
-vdev_netvsc_iface_is_netvsc(const struct if_nameindex *iface)
-{
- static const char temp[] = "/sys/class/net/%s/device/class_id";
- char path[sizeof(temp) + IF_NAMESIZE];
- FILE *f;
- int ret;
- int len = 0;
-
- ret = snprintf(path, sizeof(path), temp, iface->if_name);
- if (ret == -1 || (size_t)ret >= sizeof(path)) {
- rte_errno = ENOBUFS;
- return 0;
- }
- f = fopen(path, "r");
- if (!f) {
- rte_errno = errno;
- return 0;
- }
- ret = fscanf(f, NETVSC_CLASS_ID "%n", &len);
- if (ret == EOF)
- rte_errno = errno;
- ret = len == (int)strlen(NETVSC_CLASS_ID);
- fclose(f);
- return ret;
-}
-
-/**
* Determine if a network interface has a route.
*
* @param[in] name
* Network device name.
+ * @param[in] family
+ * Address family: AF_INET for IPv4 or AF_INET6 for IPv6.
*
* @return
- * A nonzero value when interface has an route. In case of error,
- * rte_errno is updated and 0 returned.
+ * 1 when interface has a route, negative errno value in case of error and
+ * 0 otherwise.
*/
static int
-vdev_netvsc_has_route(const char *name)
+vdev_netvsc_has_route(const struct if_nameindex *iface,
+ const unsigned char family)
{
- FILE *fp;
+ /*
+ * The implementation can be simpler by getifaddrs() function usage but
+ * it works for IPv6 only starting from glibc 2.3.3.
+ */
+ char buf[4096];
+ int len;
int ret = 0;
- char route[NETVSC_MAX_ROUTE_LINE_SIZE];
- char *netdev;
-
- fp = fopen("/proc/net/route", "r");
- if (!fp) {
- rte_errno = errno;
- return 0;
+ int res;
+ int sock;
+ struct nlmsghdr *retmsg = (struct nlmsghdr *)buf;
+ struct sockaddr_nl sa;
+ struct {
+ struct nlmsghdr nlhdr;
+ struct ifaddrmsg addrmsg;
+ } msg;
+
+ if (!iface || (family != AF_INET && family != AF_INET6)) {
+ DRV_LOG(ERR, "%s", rte_strerror(EINVAL));
+ return -EINVAL;
}
- while (fgets(route, NETVSC_MAX_ROUTE_LINE_SIZE, fp) != NULL) {
- netdev = strtok(route, "\t");
- if (strcmp(netdev, name) == 0) {
- ret = 1;
- break;
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock == -1) {
+ DRV_LOG(ERR, "cannot open socket: %s", rte_strerror(errno));
+ return -errno;
+ }
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+ sa.nl_groups = RTMGRP_LINK | RTMGRP_IPV4_IFADDR;
+ res = bind(sock, (struct sockaddr *)&sa, sizeof(sa));
+ if (res == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot bind socket: %s", rte_strerror(errno));
+ goto close;
+ }
+ memset(&msg, 0, sizeof(msg));
+ msg.nlhdr.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg));
+ msg.nlhdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
+ msg.nlhdr.nlmsg_type = RTM_GETADDR;
+ msg.nlhdr.nlmsg_pid = getpid();
+ msg.addrmsg.ifa_family = family;
+ msg.addrmsg.ifa_index = iface->if_index;
+ res = send(sock, &msg, msg.nlhdr.nlmsg_len, 0);
+ if (res == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot send socket message: %s",
+ rte_strerror(errno));
+ goto close;
+ }
+ memset(buf, 0, sizeof(buf));
+ len = recv(sock, buf, sizeof(buf), 0);
+ if (len == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot receive socket message: %s",
+ rte_strerror(errno));
+ goto close;
+ }
+ while (NLMSG_OK(retmsg, (unsigned int)len)) {
+ struct ifaddrmsg *retaddr =
+ (struct ifaddrmsg *)NLMSG_DATA(retmsg);
+
+ if (retaddr->ifa_family == family &&
+ retaddr->ifa_index == iface->if_index) {
+ struct rtattr *retrta = IFA_RTA(retaddr);
+ int attlen = IFA_PAYLOAD(retmsg);
+
+ while (RTA_OK(retrta, attlen)) {
+ if (retrta->rta_type == IFA_ADDRESS) {
+ ret = 1;
+ DRV_LOG(DEBUG, "interface %s has IP",
+ iface->if_name);
+ goto close;
+ }
+ retrta = RTA_NEXT(retrta, attlen);
+ }
}
- /* Move file pointer to the next line. */
- while (strchr(route, '\n') == NULL &&
- fgets(route, NETVSC_MAX_ROUTE_LINE_SIZE, fp) != NULL)
- ;
+ retmsg = NLMSG_NEXT(retmsg, len);
}
- fclose(fp);
+close:
+ close(sock);
return ret;
}
@@ -259,12 +329,15 @@ static int
vdev_netvsc_sysfs_readlink(char *buf, size_t size, const char *if_name,
const char *relpath)
{
+ struct vdev_netvsc_ctx *ctx;
+ char in[RTE_MAX(sizeof(ctx->yield), 256u)];
int ret;
- ret = snprintf(buf, size, "/sys/class/net/%s/%s", if_name, relpath);
- if (ret == -1 || (size_t)ret >= size)
+ ret = snprintf(in, sizeof(in) - 1, "/sys/class/net/%s/%s",
+ if_name, relpath);
+ if (ret == -1 || (size_t)ret >= sizeof(in))
return -ENOBUFS;
- ret = readlink(buf, buf, size);
+ ret = readlink(in, buf, size);
if (ret == -1)
return -errno;
if ((size_t)ret >= size - 1)
@@ -314,11 +387,9 @@ vdev_netvsc_device_probe(const struct if_nameindex *iface,
DRV_LOG(DEBUG,
"NetVSC interface \"%s\" (index %u) renamed \"%s\"",
ctx->if_name, ctx->if_index, iface->if_name);
- strncpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
+ strlcpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
return 0;
}
- if (vdev_netvsc_iface_is_netvsc(iface))
- return 0;
if (!is_same_ether_addr(eth_addr, &ctx->if_addr))
return 0;
/* Look for associated PCI device. */
@@ -387,7 +458,8 @@ vdev_netvsc_alarm(__rte_unused void *arg)
int ret;
LIST_FOREACH(ctx, &vdev_netvsc_ctx_list, entry) {
- ret = vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, ctx);
+ ret = vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, 0,
+ ctx);
if (ret < 0)
break;
}
@@ -443,7 +515,6 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
{
const char *name = va_arg(ap, const char *);
struct rte_kvargs *kvargs = va_arg(ap, struct rte_kvargs *);
- int force = va_arg(ap, int);
unsigned int specified = va_arg(ap, unsigned int);
unsigned int *matched = va_arg(ap, unsigned int *);
unsigned int i;
@@ -497,18 +568,12 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
iface->if_name, iface->if_index);
return 0;
}
- if (!vdev_netvsc_iface_is_netvsc(iface)) {
- if (!specified || !force)
- return 0;
- DRV_LOG(WARNING,
- "using non-NetVSC interface \"%s\" (index %u)",
- iface->if_name, iface->if_index);
- }
/* Routed NetVSC should not be probed. */
- if (vdev_netvsc_has_route(iface->if_name)) {
- if (!specified || !force)
+ if (vdev_netvsc_has_route(iface, AF_INET) ||
+ vdev_netvsc_has_route(iface, AF_INET6)) {
+ if (!specified)
return 0;
- DRV_LOG(WARNING, "using routed NetVSC interface \"%s\""
+ DRV_LOG(WARNING, "probably using routed NetVSC interface \"%s\""
" (index %u)", iface->if_name, iface->if_index);
}
/* Create interface context. */
@@ -520,7 +585,7 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
goto error;
}
ctx->id = vdev_netvsc_ctx_count;
- strncpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
+ strlcpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
ctx->if_index = iface->if_index;
ctx->if_addr = *eth_addr;
ctx->pipe[0] = -1;
@@ -551,13 +616,13 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
name, ctx->id);
if (ret == -1 || (size_t)ret >= sizeof(ctx->name))
++i;
- ret = snprintf(ctx->devname, sizeof(ctx->devname), "net_failsafe_%s",
- ctx->name);
+ ret = snprintf(ctx->devname, sizeof(ctx->devname), "net_failsafe_vsc%u",
+ ctx->id);
if (ret == -1 || (size_t)ret >= sizeof(ctx->devname))
++i;
ret = snprintf(ctx->devargs, sizeof(ctx->devargs),
- "fd(%d),dev(net_tap_%s,remote=%s)",
- ctx->pipe[0], ctx->name, ctx->if_name);
+ "fd(%d),dev(net_tap_vsc%u,remote=%s)",
+ ctx->pipe[0], ctx->id, ctx->if_name);
if (ret == -1 || (size_t)ret >= sizeof(ctx->devargs))
++i;
if (i) {
@@ -569,7 +634,7 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
/* Request virtual device generation. */
DRV_LOG(DEBUG, "generating virtual device \"%s\" with arguments \"%s\"",
ctx->devname, ctx->devargs);
- vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, ctx);
+ vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, 0, ctx);
ret = rte_eal_hotplug_add("vdev", ctx->devname, ctx->devargs);
if (ret)
goto error;
@@ -639,16 +704,32 @@ vdev_netvsc_vdev_probe(struct rte_vdev_device *dev)
rte_kvargs_free(kvargs);
return 0;
}
+ if (specified > 1) {
+ DRV_LOG(ERR, "More than one way used to specify the netvsc"
+ " device.");
+ goto error;
+ }
rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL);
/* Gather interfaces. */
- ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, name, kvargs,
- force, specified, &matched);
+ ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 1, name,
+ kvargs, specified, &matched);
if (ret < 0)
goto error;
- if (matched < specified)
- DRV_LOG(WARNING,
- "some of the specified parameters did not match"
- " recognized network interfaces");
+ if (specified && matched < specified) {
+ if (!force) {
+ DRV_LOG(ERR, "Cannot find the specified netvsc device");
+ goto error;
+ }
+ /* Try to force probing on non-netvsc specified device. */
+ if (vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 0, name,
+ kvargs, specified, &matched) < 0)
+ goto error;
+ if (matched < specified) {
+ DRV_LOG(ERR, "Cannot find the specified device");
+ goto error;
+ }
+ DRV_LOG(WARNING, "non-netvsc device was probed as netvsc");
+ }
ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000,
vdev_netvsc_alarm, NULL);
if (ret < 0) {
@@ -718,7 +799,8 @@ static int
vdev_netvsc_cmp_rte_device(const struct rte_device *dev1,
__rte_unused const void *_dev2)
{
- return strcmp(dev1->devargs->name, VDEV_NETVSC_DRIVER_NAME);
+ return strncmp(dev1->devargs->name, VDEV_NETVSC_DRIVER_NAME,
+ VDEV_NETVSC_DRIVER_NAME_LEN);
}
/**
@@ -733,14 +815,15 @@ vdev_netvsc_scan_callback(__rte_unused void *arg)
struct rte_devargs *devargs;
struct rte_bus *vbus = rte_bus_find_by_name("vdev");
- TAILQ_FOREACH(devargs, &devargs_list, next)
- if (!strcmp(devargs->name, VDEV_NETVSC_DRIVER_NAME))
+ RTE_EAL_DEVARGS_FOREACH("vdev", devargs)
+ if (!strncmp(devargs->name, VDEV_NETVSC_DRIVER_NAME,
+ VDEV_NETVSC_DRIVER_NAME_LEN))
return;
dev = (struct rte_vdev_device *)vbus->find_device(NULL,
vdev_netvsc_cmp_rte_device, VDEV_NETVSC_DRIVER_NAME);
if (dev)
return;
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME))
+ if (rte_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME))
DRV_LOG(ERR, "unable to add netvsc devargs.");
}
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 3aae01c3..ba9d768a 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2016 IGEL Co., Ltd.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IGEL Co.,Ltd. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 IGEL Co., Ltd.
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <unistd.h>
#include <pthread.h>
@@ -46,6 +18,11 @@
#include "rte_eth_vhost.h"
+static int vhost_logtype;
+
+#define VHOST_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
+
enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
#define ETH_VHOST_IFACE_ARG "iface"
@@ -117,7 +94,9 @@ struct pmd_internal {
char *dev_name;
char *iface_name;
uint16_t max_queues;
+ int vid;
rte_atomic32_t started;
+ uint8_t vlan_strip;
};
struct internal_list {
@@ -421,6 +400,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
+ bufs[i]->vlan_tci = 0;
+
+ if (r->internal->vlan_strip)
+ rte_vlan_strip(bufs[i]);
+
r->stats.bytes += bufs[i]->pkt_len;
}
@@ -437,7 +421,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
struct vhost_queue *r = q;
uint16_t i, nb_tx = 0;
- uint16_t nb_send = nb_bufs;
+ uint16_t nb_send = 0;
if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
return 0;
@@ -447,6 +431,22 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
goto out;
+ for (i = 0; i < nb_bufs; i++) {
+ struct rte_mbuf *m = bufs[i];
+
+ /* Do VLAN tag insertion */
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ int error = rte_vlan_insert(&m);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(m);
+ continue;
+ }
+ }
+
+ bufs[nb_send] = m;
+ ++nb_send;
+ }
+
/* Enqueue packets to guest RX queue */
while (nb_send) {
uint16_t nb_pkts;
@@ -488,6 +488,11 @@ out:
static int
eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
{
+ struct pmd_internal *internal = dev->data->dev_private;
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+
return 0;
}
@@ -519,6 +524,136 @@ find_internal_resource(char *ifname)
return list;
}
+static int
+eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int ret = 0;
+
+ vq = dev->data->rx_queues[qid];
+ if (!vq) {
+ VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
+ return -1;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
+ if (ret < 0) {
+ VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
+ return ret;
+ }
+ VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
+ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
+ rte_wmb();
+
+ return ret;
+}
+
+static int
+eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int ret = 0;
+
+ vq = dev->data->rx_queues[qid];
+ if (!vq) {
+ VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
+ return -1;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
+ if (ret < 0) {
+ VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
+ return ret;
+ }
+ VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
+ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
+ rte_wmb();
+
+ return 0;
+}
+
+static void
+eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ if (intr_handle) {
+ if (intr_handle->intr_vec)
+ free(intr_handle->intr_vec);
+ free(intr_handle);
+ }
+
+ dev->intr_handle = NULL;
+}
+
+static int
+eth_vhost_install_intr(struct rte_eth_dev *dev)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int count = 0;
+ int nb_rxq = dev->data->nb_rx_queues;
+ int i;
+ int ret;
+
+ /* uninstall firstly if we are reconnecting */
+ if (dev->intr_handle)
+ eth_vhost_uninstall_intr(dev);
+
+ dev->intr_handle = malloc(sizeof(*dev->intr_handle));
+ if (!dev->intr_handle) {
+ VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
+ return -ENOMEM;
+ }
+ memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
+
+ dev->intr_handle->efd_counter_size = sizeof(uint64_t);
+
+ dev->intr_handle->intr_vec =
+ malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
+
+ if (!dev->intr_handle->intr_vec) {
+ VHOST_LOG(ERR,
+ "Failed to allocate memory for interrupt vector\n");
+ free(dev->intr_handle);
+ return -ENOMEM;
+ }
+
+ VHOST_LOG(INFO, "Prepare intr vec\n");
+ for (i = 0; i < nb_rxq; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq) {
+ VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
+ continue;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
+ if (ret < 0) {
+ VHOST_LOG(INFO,
+ "Failed to get rxq-%d's vring, skip!\n", i);
+ continue;
+ }
+
+ if (vring.kickfd < 0) {
+ VHOST_LOG(INFO,
+ "rxq-%d's kickfd is invalid, skip!\n", i);
+ continue;
+ }
+ dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+ dev->intr_handle->efds[i] = vring.kickfd;
+ count++;
+ VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
+ }
+
+ dev->intr_handle->nb_efd = count;
+ dev->intr_handle->max_intr = count + 1;
+ dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+
+ return 0;
+}
+
static void
update_queuing_status(struct rte_eth_dev *dev)
{
@@ -527,6 +662,9 @@ update_queuing_status(struct rte_eth_dev *dev)
unsigned int i;
int allow_queuing = 1;
+ if (!dev->data->rx_queues || !dev->data->tx_queues)
+ return;
+
if (rte_atomic32_read(&internal->started) == 0 ||
rte_atomic32_read(&internal->dev_attached) == 0)
allow_queuing = 0;
@@ -551,13 +689,37 @@ update_queuing_status(struct rte_eth_dev *dev)
}
}
+static void
+queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
+{
+ struct vhost_queue *vq;
+ int i;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+}
+
static int
new_device(int vid)
{
struct rte_eth_dev *eth_dev;
struct internal_list *list;
struct pmd_internal *internal;
- struct vhost_queue *vq;
+ struct rte_eth_conf *dev_conf;
unsigned i;
char ifname[PATH_MAX];
#ifdef RTE_LIBRTE_VHOST_NUMA
@@ -567,12 +729,13 @@ new_device(int vid)
rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
- RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
+ VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
return -1;
}
eth_dev = list->eth_dev;
internal = eth_dev->data->dev_private;
+ dev_conf = &eth_dev->data->dev_conf;
#ifdef RTE_LIBRTE_VHOST_NUMA
newnode = rte_vhost_get_numa_node(vid);
@@ -580,21 +743,19 @@ new_device(int vid)
eth_dev->data->numa_node = newnode;
#endif
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = vid;
- vq->internal = internal;
- vq->port = eth_dev->data->port_id;
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = vid;
- vq->internal = internal;
- vq->port = eth_dev->data->port_id;
+ internal->vid = vid;
+ if (rte_atomic32_read(&internal->started) == 1) {
+ queue_setup(eth_dev, internal);
+
+ if (dev_conf->intr_conf.rxq) {
+ if (eth_vhost_install_intr(eth_dev) < 0) {
+ VHOST_LOG(INFO,
+ "Failed to install interrupt handler.");
+ return -1;
+ }
+ }
+ } else {
+ VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
}
for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
@@ -607,7 +768,7 @@ new_device(int vid)
rte_atomic32_set(&internal->dev_attached, 1);
update_queuing_status(eth_dev);
- RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
+ VHOST_LOG(INFO, "Vhost device %d created\n", vid);
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
@@ -628,7 +789,7 @@ destroy_device(int vid)
rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
- RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
+ VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
return;
}
eth_dev = list->eth_dev;
@@ -639,17 +800,19 @@ destroy_device(int vid)
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = -1;
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = -1;
+ if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
}
state = vring_states[eth_dev->data->port_id];
@@ -661,7 +824,8 @@ destroy_device(int vid)
state->max_vring = 0;
rte_spinlock_unlock(&state->lock);
- RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
+ VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
+ eth_vhost_uninstall_intr(eth_dev);
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
@@ -677,7 +841,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
- RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
+ VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
return -1;
}
@@ -689,7 +853,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
state->max_vring = RTE_MAX(vring, state->max_vring);
rte_spinlock_unlock(&state->lock);
- RTE_LOG(INFO, PMD, "vring%u is %s\n",
+ VHOST_LOG(INFO, "vring%u is %s\n",
vring, enable ? "enabled" : "disabled");
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
@@ -712,13 +876,13 @@ rte_eth_vhost_get_queue_event(uint16_t port_id,
int idx;
if (port_id >= RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, PMD, "Invalid port id\n");
+ VHOST_LOG(ERR, "Invalid port id\n");
return -1;
}
state = vring_states[port_id];
if (!state) {
- RTE_LOG(ERR, PMD, "Unused port\n");
+ VHOST_LOG(ERR, "Unused port\n");
return -1;
}
@@ -770,12 +934,25 @@ rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
}
static int
-eth_dev_start(struct rte_eth_dev *dev)
+eth_dev_start(struct rte_eth_dev *eth_dev)
{
- struct pmd_internal *internal = dev->data->dev_private;
+ struct pmd_internal *internal = eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+
+ queue_setup(eth_dev, internal);
+
+ if (rte_atomic32_read(&internal->dev_attached) == 1) {
+ if (dev_conf->intr_conf.rxq) {
+ if (eth_vhost_install_intr(eth_dev) < 0) {
+ VHOST_LOG(INFO,
+ "Failed to install interrupt handler.");
+ return -1;
+ }
+ }
+ }
rte_atomic32_set(&internal->started, 1);
- update_queuing_status(dev);
+ update_queuing_status(eth_dev);
return 0;
}
@@ -813,10 +990,13 @@ eth_dev_close(struct rte_eth_dev *dev)
pthread_mutex_unlock(&internal_list_lock);
rte_free(list);
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- rte_free(dev->data->rx_queues[i]);
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- rte_free(dev->data->tx_queues[i]);
+ if (dev->data->rx_queues)
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rte_free(dev->data->rx_queues[i]);
+
+ if (dev->data->tx_queues)
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_free(dev->data->tx_queues[i]);
rte_free(dev->data->mac_addrs);
free(internal->dev_name);
@@ -838,7 +1018,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (vq == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
+ VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
return -ENOMEM;
}
@@ -860,7 +1040,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (vq == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
+ VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
return -ENOMEM;
}
@@ -878,7 +1058,7 @@ eth_dev_info(struct rte_eth_dev *dev,
internal = dev->data->dev_private;
if (internal == NULL) {
- RTE_LOG(ERR, PMD, "Invalid device specified\n");
+ VHOST_LOG(ERR, "Invalid device specified\n");
return;
}
@@ -887,6 +1067,10 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = internal->max_queues;
dev_info->max_tx_queues = internal->max_queues;
dev_info->min_rx_bufsize = 0;
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
}
static int
@@ -1007,6 +1191,8 @@ static const struct eth_dev_ops ops = {
.xstats_reset = vhost_dev_xstats_reset,
.xstats_get = vhost_dev_xstats_get,
.xstats_get_names = vhost_dev_xstats_get_names,
+ .rx_queue_intr_enable = eth_rxq_intr_enable,
+ .rx_queue_intr_disable = eth_rxq_intr_disable,
};
static struct rte_vdev_driver pmd_vhost_drv;
@@ -1016,23 +1202,16 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
int16_t queues, const unsigned int numa_node, uint64_t flags)
{
const char *name = rte_vdev_device_name(dev);
- struct rte_eth_dev_data *data = NULL;
+ struct rte_eth_dev_data *data;
struct pmd_internal *internal = NULL;
struct rte_eth_dev *eth_dev = NULL;
struct ether_addr *eth_addr = NULL;
struct rte_vhost_vring_state *vring_state = NULL;
struct internal_list *list = NULL;
- RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
+ VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
numa_node);
- /* now do all data allocation - for eth_dev structure and internal
- * (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- goto error;
-
list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
if (list == NULL)
goto error;
@@ -1074,15 +1253,11 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
rte_spinlock_init(&vring_state->lock);
vring_states[eth_dev->data->port_id] = vring_state;
- /* We'll replace the 'data' originally allocated by eth_dev. So the
- * vhost PMD resources won't be shared between multi processes.
- */
- rte_memcpy(data, eth_dev->data, sizeof(*data));
- eth_dev->data = data;
-
+ data = eth_dev->data;
data->nb_rx_queues = queues;
data->nb_tx_queues = queues;
internal->max_queues = queues;
+ internal->vid = -1;
data->dev_link = pmd_link;
data->mac_addrs = eth_addr;
data->dev_flags = RTE_ETH_DEV_INTR_LSC;
@@ -1097,16 +1272,17 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
goto error;
if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
- RTE_LOG(ERR, PMD, "Can't register callbacks\n");
+ VHOST_LOG(ERR, "Can't register callbacks\n");
goto error;
}
if (rte_vhost_driver_start(iface_name) < 0) {
- RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
+ VHOST_LOG(ERR, "Failed to start driver for %s\n",
iface_name);
goto error;
}
+ rte_eth_dev_probing_finish(eth_dev);
return data->port_id;
error:
@@ -1120,7 +1296,6 @@ error:
rte_eth_dev_release_port(eth_dev);
rte_free(internal);
rte_free(list);
- rte_free(data);
return -1;
}
@@ -1164,9 +1339,23 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
int client_mode = 0;
int dequeue_zero_copy = 0;
int iommu_support = 0;
+ struct rte_eth_dev *eth_dev;
+ const char *name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
- rte_vdev_device_name(dev));
+ VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ VHOST_LOG(ERR, "Failed to probe %s\n", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL)
@@ -1239,7 +1428,7 @@ rte_pmd_vhost_remove(struct rte_vdev_device *dev)
struct rte_eth_dev *eth_dev = NULL;
name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
+ VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
/* find an ethdev entry */
eth_dev = rte_eth_dev_allocated(name);
@@ -1251,8 +1440,6 @@ rte_pmd_vhost_remove(struct rte_vdev_device *dev)
rte_free(vring_states[eth_dev->data->port_id]);
vring_states[eth_dev->data->port_id] = NULL;
- rte_free(eth_dev->data);
-
rte_eth_dev_release_port(eth_dev);
return 0;
@@ -1268,3 +1455,12 @@ RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
"iface=<ifc> "
"queues=<int>");
+
+RTE_INIT(vhost_init_log);
+static void
+vhost_init_log(void)
+{
+ vhost_logtype = rte_log_register("pmd.net.vhost");
+ if (vhost_logtype >= 0)
+ rte_log_set_level(vhost_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index 948f3c81..0e68b9f6 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -1,36 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 IGEL Co., Ltd.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IGEL Co., Ltd. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 IGEL Co., Ltd.
+ * Copyright(c) 2016-2018 Intel Corporation
*/
-
#ifndef _RTE_ETH_VHOST_H_
#define _RTE_ETH_VHOST_H_
diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build
new file mode 100644
index 00000000..e43ce6bb
--- /dev/null
+++ b/drivers/net/virtio/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources += files('virtio_ethdev.c',
+ 'virtio_pci.c',
+ 'virtio_rxtx.c',
+ 'virtio_rxtx_simple.c',
+ 'virtqueue.c')
+deps += ['kvargs', 'bus_pci']
+
+if arch_subdir == 'x86'
+ sources += files('virtio_rxtx_simple_sse.c')
+elif arch_subdir == 'arm' and host_machine.cpu_family().startswith('aarch64')
+ sources += files('virtio_rxtx_simple_neon.c')
+endif
+
+if host_machine.system() == 'linux'
+ dpdk_conf.set('RTE_VIRTIO_USER', 1)
+
+ sources += files('virtio_user_ethdev.c',
+ 'virtio_user/vhost_kernel.c',
+ 'virtio_user/vhost_kernel_tap.c',
+ 'virtio_user/vhost_user.c',
+ 'virtio_user/virtio_user_dev.c')
+ deps += ['bus_vdev']
+endif
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 884f74ad..df50a571 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -14,7 +14,6 @@
#include <rte_string_fns.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -29,6 +28,7 @@
#include <rte_eal.h>
#include <rte_dev.h>
#include <rte_cycles.h>
+#include <rte_kvargs.h>
#include "virtio_ethdev.h"
#include "virtio_pci.h"
@@ -68,7 +68,7 @@ static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
-static void virtio_mac_addr_set(struct rte_eth_dev *dev,
+static int virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int virtio_intr_enable(struct rte_eth_dev *dev);
@@ -392,8 +392,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
- SOCKET_ID_ANY,
- 0, VIRTIO_PCI_VRING_ALIGN);
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
mz = rte_memzone_lookup(vq_name);
@@ -418,8 +418,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
dev->data->port_id, vtpci_queue_idx);
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
- SOCKET_ID_ANY, 0,
- RTE_CACHE_LINE_SIZE);
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ RTE_CACHE_LINE_SIZE);
if (hdr_mz == NULL) {
if (rte_errno == EEXIST)
hdr_mz = rte_memzone_lookup(vq_hdr_name);
@@ -774,46 +774,6 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.mac_addr_set = virtio_mac_addr_set,
};
-static inline int
-virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static void
virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
@@ -1097,7 +1057,7 @@ virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
virtio_mac_table_set(hw, uc, mc);
}
-static void
+static int
virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct virtio_hw *hw = dev->data->dev_private;
@@ -1113,9 +1073,14 @@ virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
- virtio_send_command(hw->cvq, &ctrl, &len, 1);
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
- virtio_set_hwaddr(hw);
+ return virtio_send_command(hw->cvq, &ctrl, &len, 1);
+ }
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
+ return -ENOTSUP;
+
+ virtio_set_hwaddr(hw);
+ return 0;
}
static int
@@ -1273,9 +1238,16 @@ static void
virtio_notify_peers(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
- struct virtnet_rx *rxvq = dev->data->rx_queues[0];
+ struct virtnet_rx *rxvq;
struct rte_mbuf *rarp_mbuf;
+ if (!dev->data->rx_queues)
+ return;
+
+ rxvq = dev->data->rx_queues[0];
+ if (!rxvq)
+ return;
+
rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
(struct ether_addr *)hw->mac_addr);
if (rarp_mbuf == NULL) {
@@ -1333,7 +1305,8 @@ virtio_interrupt_handler(void *param)
if (isr & VIRTIO_NET_S_ANNOUNCE) {
virtio_notify_peers(dev);
- virtio_ack_link_announce(dev);
+ if (hw->cvq)
+ virtio_ack_link_announce(dev);
}
}
@@ -1744,9 +1717,51 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int vdpa_check_handler(__rte_unused const char *key,
+ const char *value, __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+vdpa_mode_selected(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *key = "vdpa";
+ int ret = 0;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key))
+ goto exit;
+
+ /* vdpa mode selected when there's a key-value pair: vdpa=1 */
+ if (rte_kvargs_process(kvlist, key,
+ vdpa_check_handler, NULL) < 0) {
+ goto exit;
+ }
+ ret = 1;
+
+exit:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
+ /* virtio pmd skips probe if device needs to work in vdpa mode */
+ if (vdpa_mode_selected(pci_dev->device.devargs))
+ return 1;
+
return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
eth_virtio_dev_init);
}
@@ -1787,6 +1802,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t rx_offloads = rxmode->offloads;
uint64_t req_features;
int ret;
@@ -1799,14 +1815,11 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
- /* The name hw_ip_checksum is a bit confusing since it can be
- * set by the application to request L3 and/or L4 checksums. In
- * case of virtio, only L4 checksum is supported.
- */
- if (rxmode->hw_ip_checksum)
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
- if (rxmode->enable_lro)
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
req_features |=
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
@@ -1818,14 +1831,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
- if (rxmode->hw_ip_checksum &&
+ if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM)) &&
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
PMD_DRV_LOG(ERR,
"rx checksum not available on this host");
return -ENOTSUP;
}
- if (rxmode->enable_lro &&
+ if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
(!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
PMD_DRV_LOG(ERR,
@@ -1837,9 +1851,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
virtio_dev_cq_start(dev);
- hw->vlan_strip = rxmode->hw_vlan_strip;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ hw->vlan_strip = 1;
- if (rxmode->hw_vlan_filter
+ if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
&& !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
@@ -1870,7 +1885,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
hw->use_simple_tx = 0;
}
- if (rxmode->hw_ip_checksum)
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM))
hw->use_simple_rx = 0;
return 0;
@@ -2028,21 +2044,21 @@ virtio_dev_stop(struct rte_eth_dev *dev)
hw->started = 0;
memset(&link, 0, sizeof(link));
- virtio_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
rte_spinlock_unlock(&hw->state_lock);
}
static int
virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
uint16_t status;
struct virtio_hw *hw = dev->data->dev_private;
+
memset(&link, 0, sizeof(link));
- virtio_dev_atomic_read_link_status(dev, &link);
- old = link;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
if (hw->started == 0) {
link.link_status = ETH_LINK_DOWN;
@@ -2063,9 +2079,8 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
} else {
link.link_status = ETH_LINK_UP;
}
- virtio_dev_atomic_write_link_status(dev, &link);
- return (old.link_status == link.link_status) ? -1 : 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -2073,9 +2088,10 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t offloads = rxmode->offloads;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->hw_vlan_filter &&
+ if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(NOTICE,
@@ -2086,7 +2102,7 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
if (mask & ETH_VLAN_STRIP_MASK)
- hw->vlan_strip = rxmode->hw_vlan_strip;
+ hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
@@ -2099,7 +2115,6 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
- dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =
@@ -2112,18 +2127,21 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
};
host_features = VTPCI_OPS(hw)->get_features(hw);
- dev_info->rx_offload_capa = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM;
}
+ if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
if ((host_features & tso_mask) == tso_mask)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
- dev_info->tx_offload_capa = 0;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_UDP_CKSUM |
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 4539d2e4..bb40064e 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -43,6 +43,14 @@
1u << VIRTIO_NET_F_GUEST_CSUM | \
1u << VIRTIO_NET_F_GUEST_TSO4 | \
1u << VIRTIO_NET_F_GUEST_TSO6)
+
+#define VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS \
+ (DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_LRO | \
+ DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_VLAN_STRIP)
+
/*
* CQ function prototype
*/
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 8dbf2a30..92fab217 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -38,10 +38,6 @@
#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
#endif
-
-#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
int
virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
{
@@ -389,7 +385,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
- __rte_unused const struct rte_eth_rxconf *rx_conf,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
@@ -410,6 +406,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
rte_exit(EXIT_FAILURE,
"Cannot allocate mbufs for rx virtqueue");
}
+
dev->data->rx_queues[queue_idx] = rxvq;
return 0;
@@ -502,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
/* cannot use simple rxtx funcs with multisegs or offloads */
- if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) != VIRTIO_SIMPLE_FLAGS)
+ if (dev->data->dev_conf.txmode.offloads)
hw->use_simple_tx = 0;
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
index 8d0a1ab2..b2444096 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -70,6 +70,32 @@ static uint64_t vhost_req_user_to_kernel[] = {
[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
};
+struct walk_arg {
+ struct vhost_memory_kernel *vm;
+ uint32_t region_nr;
+};
+static int
+add_memory_region(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct walk_arg *wa = arg;
+ struct vhost_memory_region *mr;
+ void *start_addr;
+
+ if (wa->region_nr >= max_regions)
+ return -1;
+
+ mr = &wa->vm->regions[wa->region_nr++];
+ start_addr = ms->addr;
+
+ mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
+ mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
+ mr->memory_size = len;
+ mr->mmap_offset = 0;
+
+ return 0;
+}
+
/* By default, vhost kernel module allows 64 regions, but DPDK allows
* 256 segments. As a relief, below function merges those virtually
* adjacent memsegs into one region.
@@ -77,63 +103,24 @@ static uint64_t vhost_req_user_to_kernel[] = {
static struct vhost_memory_kernel *
prepare_vhost_memory_kernel(void)
{
- uint32_t i, j, k = 0;
- struct rte_memseg *seg;
- struct vhost_memory_region *mr;
struct vhost_memory_kernel *vm;
+ struct walk_arg wa;
vm = malloc(sizeof(struct vhost_memory_kernel) +
- max_regions *
- sizeof(struct vhost_memory_region));
+ max_regions *
+ sizeof(struct vhost_memory_region));
if (!vm)
return NULL;
- for (i = 0; i < RTE_MAX_MEMSEG; ++i) {
- seg = &rte_eal_get_configuration()->mem_config->memseg[i];
- if (!seg->addr)
- break;
-
- int new_region = 1;
-
- for (j = 0; j < k; ++j) {
- mr = &vm->regions[j];
-
- if (mr->userspace_addr + mr->memory_size ==
- (uint64_t)(uintptr_t)seg->addr) {
- mr->memory_size += seg->len;
- new_region = 0;
- break;
- }
-
- if ((uint64_t)(uintptr_t)seg->addr + seg->len ==
- mr->userspace_addr) {
- mr->guest_phys_addr =
- (uint64_t)(uintptr_t)seg->addr;
- mr->userspace_addr =
- (uint64_t)(uintptr_t)seg->addr;
- mr->memory_size += seg->len;
- new_region = 0;
- break;
- }
- }
-
- if (new_region == 0)
- continue;
-
- mr = &vm->regions[k++];
- /* use vaddr here! */
- mr->guest_phys_addr = (uint64_t)(uintptr_t)seg->addr;
- mr->userspace_addr = (uint64_t)(uintptr_t)seg->addr;
- mr->memory_size = seg->len;
- mr->mmap_offset = 0;
+ wa.region_nr = 0;
+ wa.vm = vm;
- if (k >= max_regions) {
- free(vm);
- return NULL;
- }
+ if (rte_memseg_contig_walk(add_memory_region, &wa) < 0) {
+ free(vm);
+ return NULL;
}
- vm->nregions = k;
+ vm->nregions = wa.region_nr;
vm->padding = 0;
return vm;
}
@@ -351,7 +338,8 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
else
hdr_size = sizeof(struct virtio_net_hdr);
- tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq);
+ tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq,
+ (char *)dev->mac_addr);
if (tapfd < 0) {
PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
return -1;
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index 1a47a348..9ea7ade7 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -7,15 +7,19 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <net/if.h>
+#include <net/if_arp.h>
#include <errno.h>
#include <string.h>
#include <limits.h>
+#include <rte_ether.h>
+
#include "vhost_kernel_tap.h"
#include "../virtio_logs.h"
int
-vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
+vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac)
{
unsigned int tap_features;
int sndbuf = INT_MAX;
@@ -94,6 +98,14 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s",
strerror(errno));
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;
+ memcpy(ifr.ifr_hwaddr.sa_data, mac, ETHER_ADDR_LEN);
+ if (ioctl(tapfd, SIOCSIFHWADDR, (void *)&ifr) == -1) {
+ PMD_DRV_LOG(ERR, "SIOCSIFHWADDR failed: %s", strerror(errno));
+ goto error;
+ }
+
if (!(*p_ifname))
*p_ifname = strdup(ifr.ifr_name);
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
index 7d52e6b7..01a026f5 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
@@ -35,4 +35,5 @@
/* Constants */
#define PATH_NET_TUN "/dev/net/tun"
-int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq);
+int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac);
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 91c6449b..ef6e43df 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -138,12 +138,13 @@ struct hugepage_file_info {
static int
get_hugepage_file_info(struct hugepage_file_info huges[], int max)
{
- int idx;
+ int idx, k, exist;
FILE *f;
char buf[BUFSIZ], *tmp, *tail;
char *str_underline, *str_start;
int huge_index;
uint64_t v_start, v_end;
+ struct stat stats;
f = fopen("/proc/self/maps", "r");
if (!f) {
@@ -183,16 +184,39 @@ get_hugepage_file_info(struct hugepage_file_info huges[], int max)
if (sscanf(str_start, "map_%d", &huge_index) != 1)
continue;
+ /* skip duplicated file which is mapped to different regions */
+ for (k = 0, exist = -1; k < idx; ++k) {
+ if (!strcmp(huges[k].path, tmp)) {
+ exist = k;
+ break;
+ }
+ }
+ if (exist >= 0)
+ continue;
+
if (idx >= max) {
PMD_DRV_LOG(ERR, "Exceed maximum of %d", max);
goto error;
}
+
huges[idx].addr = v_start;
- huges[idx].size = v_end - v_start;
+ huges[idx].size = v_end - v_start; /* To be corrected later */
snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
idx++;
}
+ /* correct the size for files who have many regions */
+ for (k = 0; k < idx; ++k) {
+ if (stat(huges[k].path, &stats) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to stat %s, %s\n",
+ huges[k].path, strerror(errno));
+ continue;
+ }
+ huges[k].size = stats.st_size;
+ PMD_DRV_LOG(INFO, "file %s, size %zx\n",
+ huges[k].path, huges[k].size);
+ }
+
fclose(f);
return idx;
@@ -263,6 +287,9 @@ vhost_user_sock(struct virtio_user_dev *dev,
PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
+ if (dev->is_server && vhostfd < 0)
+ return -1;
+
msg.request = req;
msg.flags = VHOST_USER_VERSION;
msg.size = 0;
@@ -378,6 +405,30 @@ vhost_user_sock(struct virtio_user_dev *dev,
return 0;
}
+#define MAX_VIRTIO_USER_BACKLOG 1
+static int
+virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
+{
+ int ret;
+ int flag;
+ int fd = dev->listenfd;
+
+ ret = bind(fd, (struct sockaddr *)un, sizeof(*un));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to bind to %s: %s; remove it and try again\n",
+ dev->path, strerror(errno));
+ return -1;
+ }
+ ret = listen(fd, MAX_VIRTIO_USER_BACKLOG);
+ if (ret < 0)
+ return -1;
+
+ flag = fcntl(fd, F_GETFL);
+ fcntl(fd, F_SETFL, flag | O_NONBLOCK);
+
+ return 0;
+}
+
/**
* Set up environment to talk with a vhost user backend.
*
@@ -405,13 +456,24 @@ vhost_user_setup(struct virtio_user_dev *dev)
memset(&un, 0, sizeof(un));
un.sun_family = AF_UNIX;
snprintf(un.sun_path, sizeof(un.sun_path), "%s", dev->path);
- if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
- PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
- close(fd);
- return -1;
+
+ if (dev->is_server) {
+ dev->listenfd = fd;
+ if (virtio_user_start_server(dev, &un) < 0) {
+ PMD_DRV_LOG(ERR, "virtio-user startup fails in server mode");
+ close(fd);
+ return -1;
+ }
+ dev->vhostfd = -1;
+ } else {
+ if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
+ PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
+ close(fd);
+ return -1;
+ }
+ dev->vhostfd = fd;
}
- dev->vhostfd = fd;
return 0;
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index f90fee9e..4322527f 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -17,6 +17,8 @@
#include "virtio_user_dev.h"
#include "../virtio_ethdev.h"
+#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
+
static int
virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
{
@@ -94,11 +96,27 @@ virtio_user_queue_setup(struct virtio_user_dev *dev,
}
int
+is_vhost_user_by_type(const char *path)
+{
+ struct stat sb;
+
+ if (stat(path, &sb) == -1)
+ return 0;
+
+ return S_ISSOCK(sb.st_mode);
+}
+
+int
virtio_user_start_device(struct virtio_user_dev *dev)
{
uint64_t features;
int ret;
+ pthread_mutex_lock(&dev->mutex);
+
+ if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
+ goto error;
+
/* Do not check return as already done in init, or reset in stop */
dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL);
@@ -132,8 +150,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
*/
dev->ops->enable_qp(dev, 0, 1);
+ dev->started = true;
+ pthread_mutex_unlock(&dev->mutex);
+
return 0;
error:
+ pthread_mutex_unlock(&dev->mutex);
/* TODO: free resource here or caller to check */
return -1;
}
@@ -142,13 +164,17 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
{
uint32_t i;
+ pthread_mutex_lock(&dev->mutex);
for (i = 0; i < dev->max_queue_pairs; ++i)
dev->ops->enable_qp(dev, i, 0);
if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) {
PMD_DRV_LOG(INFO, "Failed to reset the device\n");
+ pthread_mutex_unlock(&dev->mutex);
return -1;
}
+ dev->started = false;
+ pthread_mutex_unlock(&dev->mutex);
return 0;
}
@@ -174,17 +200,6 @@ parse_mac(struct virtio_user_dev *dev, const char *mac)
}
}
-int
-is_vhost_user_by_type(const char *path)
-{
- struct stat sb;
-
- if (stat(path, &sb) == -1)
- return 0;
-
- return S_ISSOCK(sb.st_mode);
-}
-
static int
virtio_user_dev_init_notify(struct virtio_user_dev *dev)
{
@@ -254,10 +269,41 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
eth_dev->intr_handle->fd = -1;
if (dev->vhostfd >= 0)
eth_dev->intr_handle->fd = dev->vhostfd;
+ else if (dev->is_server)
+ eth_dev->intr_handle->fd = dev->listenfd;
return 0;
}
+static void
+virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
+ const void *addr __rte_unused,
+ size_t len __rte_unused,
+ void *arg)
+{
+ struct virtio_user_dev *dev = arg;
+ uint16_t i;
+
+ pthread_mutex_lock(&dev->mutex);
+
+ if (dev->started == false)
+ goto exit;
+
+ /* Step 1: pause the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 0);
+
+ /* Step 2: update memory regions */
+ dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+
+ /* Step 3: resume the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 1);
+
+exit:
+ pthread_mutex_unlock(&dev->mutex);
+}
+
static int
virtio_user_dev_setup(struct virtio_user_dev *dev)
{
@@ -267,21 +313,32 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
dev->vhostfds = NULL;
dev->tapfds = NULL;
- if (is_vhost_user_by_type(dev->path)) {
- dev->ops = &ops_user;
- } else {
- dev->ops = &ops_kernel;
-
- dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
- dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
- if (!dev->vhostfds || !dev->tapfds) {
- PMD_INIT_LOG(ERR, "Failed to malloc");
+ if (dev->is_server) {
+ if (access(dev->path, F_OK) == 0 &&
+ !is_vhost_user_by_type(dev->path)) {
+ PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
return -1;
}
-
- for (q = 0; q < dev->max_queue_pairs; ++q) {
- dev->vhostfds[q] = -1;
- dev->tapfds[q] = -1;
+ dev->ops = &ops_user;
+ } else {
+ if (is_vhost_user_by_type(dev->path)) {
+ dev->ops = &ops_user;
+ } else {
+ dev->ops = &ops_kernel;
+
+ dev->vhostfds = malloc(dev->max_queue_pairs *
+ sizeof(int));
+ dev->tapfds = malloc(dev->max_queue_pairs *
+ sizeof(int));
+ if (!dev->vhostfds || !dev->tapfds) {
+ PMD_INIT_LOG(ERR, "Failed to malloc");
+ return -1;
+ }
+
+ for (q = 0; q < dev->max_queue_pairs; ++q) {
+ dev->vhostfds[q] = -1;
+ dev->tapfds[q] = -1;
+ }
}
}
@@ -320,7 +377,9 @@ int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname)
{
+ pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
+ dev->started = 0;
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
@@ -337,18 +396,33 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
return -1;
}
- if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
- PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
- return -1;
- }
+ if (!dev->is_server) {
+ if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
+ NULL) < 0) {
+ PMD_INIT_LOG(ERR, "set_owner fails: %s",
+ strerror(errno));
+ return -1;
+ }
- if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
- &dev->device_features) < 0) {
- PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
- return -1;
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
+ &dev->device_features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ } else {
+ /* We just pretend vhost-user can support all these features.
+ * Note that this could be problematic that if some feature is
+ * negotiated but not supported by the vhost-user which comes
+ * later.
+ */
+ dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
}
+
if (dev->mac_specified)
dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
+ else
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
if (cq) {
/* device does not really need to know anything about CQ,
@@ -371,6 +445,15 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
+ if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
+ virtio_user_mem_event_cb, dev)) {
+ if (rte_errno != ENOTSUP) {
+ PMD_INIT_LOG(ERR, "Failed to register mem event"
+ " callback\n");
+ return -1;
+ }
+ }
+
return 0;
}
@@ -381,6 +464,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
virtio_user_stop_device(dev);
+ rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
+
for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
close(dev->callfds[i]);
close(dev->kickfds[i]);
@@ -388,6 +473,11 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
close(dev->vhostfd);
+ if (dev->is_server && dev->listenfd >= 0) {
+ close(dev->listenfd);
+ dev->listenfd = -1;
+ }
+
if (dev->vhostfds) {
for (i = 0; i < dev->max_queue_pairs; ++i)
close(dev->vhostfds[i]);
@@ -396,9 +486,12 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
}
free(dev->ifname);
+
+ if (dev->is_server)
+ unlink(dev->path);
}
-static uint8_t
+uint8_t
virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
{
uint16_t i;
@@ -410,11 +503,17 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
return -1;
}
- for (i = 0; i < q_pairs; ++i)
- ret |= dev->ops->enable_qp(dev, i, 1);
- for (i = q_pairs; i < dev->max_queue_pairs; ++i)
- ret |= dev->ops->enable_qp(dev, i, 0);
-
+ /* Server mode can't enable queue pairs if vhostfd is invalid,
+ * always return 0 in this case.
+ */
+ if (dev->vhostfd >= 0) {
+ for (i = 0; i < q_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 1);
+ for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 0);
+ } else if (!dev->is_server) {
+ ret = ~0;
+ }
dev->queue_pairs = q_pairs;
return ret;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 64467b4f..d2d4cb82 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -6,6 +6,7 @@
#define _VIRTIO_USER_DEV_H
#include <limits.h>
+#include <stdbool.h>
#include "../virtio_pci.h"
#include "../virtio_ring.h"
#include "vhost.h"
@@ -13,6 +14,8 @@
struct virtio_user_dev {
/* for vhost_user backend */
int vhostfd;
+ int listenfd; /* listening fd */
+ bool is_server; /* server or client mode */
/* for vhost_kernel backend */
char *ifname;
@@ -31,11 +34,13 @@ struct virtio_user_dev {
*/
uint64_t device_features; /* supported features by device */
uint8_t status;
- uint8_t port_id;
+ uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
struct virtio_user_backend_ops *ops;
+ pthread_mutex_t mutex;
+ bool started;
};
int is_vhost_user_by_type(const char *path);
@@ -45,4 +50,5 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
+uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
#endif
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 26364900..1c102ca7 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -24,15 +24,100 @@
#define virtio_user_get_dev(hw) \
((struct virtio_user_dev *)(hw)->virtio_user_dev)
+static int
+virtio_user_server_reconnect(struct virtio_user_dev *dev)
+{
+ int ret;
+ int flag;
+ int connectfd;
+ uint64_t features = dev->device_features;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+
+ connectfd = accept(dev->listenfd, NULL, NULL);
+ if (connectfd < 0)
+ return -1;
+
+ dev->vhostfd = connectfd;
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
+ &dev->device_features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ features &= ~dev->device_features;
+ /* For following bits, vhost-user doesn't really need to know */
+ features &= ~(1ull << VIRTIO_NET_F_MAC);
+ features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
+ features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+ features &= ~(1ull << VIRTIO_NET_F_STATUS);
+ if (features)
+ PMD_INIT_LOG(ERR, "WARNING: Some features 0x%" PRIx64 " are not supported by vhost-user!",
+ features);
+
+ dev->features &= dev->device_features;
+
+ flag = fcntl(connectfd, F_GETFD);
+ fcntl(connectfd, F_SETFL, flag | O_NONBLOCK);
+
+ ret = virtio_user_start_device(dev);
+ if (ret < 0)
+ return -1;
+
+ if (dev->queue_pairs > 1) {
+ ret = virtio_user_handle_mq(dev, dev->queue_pairs);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
+ return -1;
+ }
+ }
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt disable failed");
+ return -1;
+ }
+ rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler,
+ eth_dev);
+ eth_dev->intr_handle->fd = connectfd;
+ rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+
+ if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -1;
+ }
+ }
+ PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
+ return 0;
+}
+
static void
virtio_user_delayed_handler(void *param)
{
struct virtio_hw *hw = (struct virtio_hw *)param;
- struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id];
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- rte_intr_callback_unregister(dev->intr_handle,
- virtio_interrupt_handler,
- dev);
+ if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt disable failed");
+ return;
+ }
+ rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+ if (dev->is_server) {
+ if (dev->vhostfd >= 0) {
+ close(dev->vhostfd);
+ dev->vhostfd = -1;
+ }
+ eth_dev->intr_handle->fd = dev->listenfd;
+ rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+ if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return;
+ }
+ }
}
static void
@@ -67,12 +152,10 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
dev->status &= (~VIRTIO_NET_S_LINK_UP);
PMD_DRV_LOG(ERR, "virtio-user port %u is down",
hw->port_id);
- /* Only client mode is available now. Once the
- * connection is broken, it can never be up
- * again. Besides, this function could be called
- * in the process of interrupt handling,
- * callback cannot be unregistered here, set an
- * alarm to do it.
+
+ /* This function could be called in the process
+ * of interrupt handling, callback cannot be
+ * unregistered here, set an alarm to do it.
*/
rte_eal_alarm_set(1,
virtio_user_delayed_handler,
@@ -85,7 +168,12 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
return;
}
+ } else if (dev->is_server) {
+ dev->status &= (~VIRTIO_NET_S_LINK_UP);
+ if (virtio_user_server_reconnect(dev) >= 0)
+ dev->status |= VIRTIO_NET_S_LINK_UP;
}
+
*(uint16_t *)dst = dev->status;
}
@@ -278,12 +366,15 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_QUEUE_SIZE,
#define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
VIRTIO_USER_ARG_INTERFACE_NAME,
+#define VIRTIO_USER_ARG_SERVER_MODE "server"
+ VIRTIO_USER_ARG_SERVER_MODE,
NULL
};
#define VIRTIO_USER_DEF_CQ_EN 0
#define VIRTIO_USER_DEF_Q_NUM 1
#define VIRTIO_USER_DEF_Q_SZ 256
+#define VIRTIO_USER_DEF_SERVER_MODE 0
static int
get_string_arg(const char *key __rte_unused,
@@ -378,6 +469,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
+ uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
char *path = NULL;
char *ifname = NULL;
char *mac_addr = NULL;
@@ -445,6 +537,15 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE,
+ &get_integer_arg, &server_mode) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_SERVER_MODE);
+ goto end;
+ }
+ }
+
if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
&get_integer_arg, &cq) < 0) {
@@ -469,6 +570,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ struct virtio_user_dev *vu_dev;
+
eth_dev = virtio_user_eth_dev_alloc(dev);
if (!eth_dev) {
PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
@@ -476,12 +579,18 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
hw = eth_dev->data->dev_private;
+ vu_dev = virtio_user_get_dev(hw);
+ if (server_mode == 1)
+ vu_dev->is_server = true;
+ else
+ vu_dev->is_server = false;
if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
queue_size, mac_addr, &ifname) < 0) {
PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
virtio_user_eth_dev_free(eth_dev);
goto end;
}
+
} else {
eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
if (!eth_dev)
@@ -494,6 +603,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
virtio_user_eth_dev_free(eth_dev);
goto end;
}
+
+ rte_eth_dev_probing_finish(eth_dev);
ret = 0;
end:
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 6bfbf019..f1141da6 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -8,6 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_vmxnet3_uio.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
@@ -15,7 +16,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
#
diff --git a/drivers/net/vmxnet3/base/upt1_defs.h b/drivers/net/vmxnet3/base/upt1_defs.h
index cf9141b2..5fd7a397 100644
--- a/drivers/net/vmxnet3/base/upt1_defs.h
+++ b/drivers/net/vmxnet3/base/upt1_defs.h
@@ -1,9 +1,6 @@
-/*********************************************************
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2007 VMware, Inc. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- *
- *********************************************************/
+ */
/* upt1_defs.h
*
diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h
index a455e270..bbec708c 100644
--- a/drivers/net/vmxnet3/base/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h
@@ -1,9 +1,6 @@
-/*********************************************************
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2007 VMware, Inc. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- *
- *********************************************************/
+ */
/*
* vmxnet3_defs.h --
@@ -327,7 +324,32 @@ struct Vmxnet3_RxCompDescExt {
uint8 segCnt; /* Number of aggregated packets */
uint8 dupAckCnt; /* Number of duplicate Acks */
__le16 tsDelta; /* TCP timestamp difference */
- __le32 dword2[2];
+ __le32 dword2;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gen : 1; /* generation bit */
+ uint32 type : 7; /* completion type */
+ uint32 fcs : 1; /* Frame CRC correct */
+ uint32 frg : 1; /* IP Fragment */
+ uint32 v4 : 1; /* IPv4 */
+ uint32 v6 : 1; /* IPv6 */
+ uint32 ipc : 1; /* IP Checksum Correct */
+ uint32 tcp : 1; /* TCP packet */
+ uint32 udp : 1; /* UDP packet */
+ uint32 tuc : 1; /* TCP/UDP Checksum Correct */
+ uint32 mss : 16;
+#else
+ uint32 mss : 16;
+ uint32 tuc : 1; /* TCP/UDP Checksum Correct */
+ uint32 udp : 1; /* UDP packet */
+ uint32 tcp : 1; /* TCP packet */
+ uint32 ipc : 1; /* IP Checksum Correct */
+ uint32 v6 : 1; /* IPv6 */
+ uint32 v4 : 1; /* IPv4 */
+ uint32 frg : 1; /* IP Fragment */
+ uint32 fcs : 1; /* Frame CRC correct */
+ uint32 type : 7; /* completion type */
+ uint32 gen : 1; /* generation bit */
+#endif /* __BIG_ENDIAN_BITFIELD */
}
#include "vmware_pack_end.h"
Vmxnet3_RxCompDescExt;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 4e68aae6..ba932ff2 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -20,7 +20,6 @@
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_memzone.h>
@@ -43,6 +42,23 @@
#define VMXNET3_TX_MAX_SEG UINT8_MAX
+#define VMXNET3_TX_OFFLOAD_CAP \
+ (DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define VMXNET3_RX_OFFLOAD_CAP \
+ (DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_LRO | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME)
+
static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
@@ -73,7 +89,7 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vid, int on);
static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
+static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void vmxnet3_interrupt_handler(void *param);
@@ -151,66 +167,14 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
if (mz)
rte_memzone_free(mz);
return rte_memzone_reserve_aligned(z_name, size, socket_id,
- 0, align);
+ RTE_MEMZONE_IOVA_CONTIG, align);
}
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
-}
-
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-
-static int
-vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to write to.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static int
-vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
}
/*
@@ -267,6 +231,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
struct vmxnet3_hw *hw = eth_dev->data->dev_private;
uint32_t mac_hi, mac_lo, ver;
+ struct rte_eth_link link;
PMD_INIT_FUNC_TRACE();
@@ -369,6 +334,13 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
+ /* set the initial link status */
+ memset(&link, 0, sizeof(link));
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
+ rte_eth_linkstatus_set(eth_dev, &link);
+
return 0;
}
@@ -612,9 +584,12 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
uint32_t mtu = dev->data->mtu;
Vmxnet3_DriverShared *shared = hw->shared;
Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
uint32_t i;
int ret;
+ hw->mtu = mtu;
+
shared->magic = VMXNET3_REV1_MAGIC;
devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
@@ -644,6 +619,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
+ txq->shared = &hw->tqd_start[i];
+
tqd->ctrl.txNumDeferred = 0;
tqd->ctrl.txThreshold = 1;
tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
@@ -664,6 +641,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+ rxq->shared = &hw->rqd_start[i];
+
rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
@@ -685,10 +664,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
devRead->rxFilterConf.rxMode = 0;
/* Setting up feature flags */
- if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
devRead->misc.uptFeatures |= VMXNET3_F_LRO;
devRead->misc.maxNumRxSG = 0;
}
@@ -853,7 +832,10 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
- vmxnet3_dev_atomic_write_link_status(dev, &link);
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
+ rte_eth_linkstatus_set(dev, &link);
}
/*
@@ -1054,18 +1036,16 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->q_errors[i] = rxStats.pktsRxError;
stats->ierrors += rxStats.pktsRxError;
- stats->rx_nombuf += rxStats.pktsRxOutOfBuf;
+ stats->imissed += rxStats.pktsRxOutOfBuf;
}
return 0;
}
static void
-vmxnet3_dev_info_get(struct rte_eth_dev *dev,
+vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
@@ -1090,17 +1070,10 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
.nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
};
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO;
-
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
+ dev_info->tx_queue_offload_capa = 0;
}
static const uint32_t *
@@ -1117,13 +1090,14 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
-static void
+static int
vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr));
vmxnet3_write_mac(hw, mac_addr->addr_bytes);
+ return 0;
}
/* return 0 means link status changed, -1 means not changed */
@@ -1132,25 +1106,21 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- struct rte_eth_link old = { 0 }, link;
+ struct rte_eth_link link;
uint32_t ret;
memset(&link, 0, sizeof(link));
- vmxnet3_dev_atomic_read_link_status(dev, &old);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
- if (ret & 0x1) {
+ if (ret & 0x1)
link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_AUTONEG;
- }
-
- vmxnet3_dev_atomic_write_link_status(dev, &link);
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
- return (old.link_status == link.link_status) ? -1 : 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -1197,8 +1167,9 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
else
memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1260,9 +1231,10 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
struct vmxnet3_hw *hw = dev->data->dev_private;
Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
uint32_t *vf_table = devRead->rxFilterConf.vfTable;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
else
devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1272,7 +1244,7 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
else
memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index b2a8cf35..d3f2b352 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -87,6 +87,7 @@ struct vmxnet3_hw {
uint64_t queueDescPA;
uint16_t queue_desc_len;
+ uint16_t mtu;
VMXNET3_RSSConf *rss_conf;
uint64_t rss_confPA;
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
index 552180e8..50992349 100644
--- a/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -14,7 +14,7 @@
#define VMXNET3_DEF_RX_RING_SIZE 128
/* Default rx data ring desc size */
-#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
+#define VMXNET3_DEF_RXDATA_DESC_SIZE 256
#define VMXNET3_SUCCESS 0
#define VMXNET3_FAIL -1
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 3a8c62fc..cf85f3d6 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -457,6 +457,14 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
struct Vmxnet3_TxDataDesc *tdd;
+ /* Skip empty packets */
+ if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) {
+ txq->stats.drop_total++;
+ rte_pktmbuf_free(txm);
+ nb_tx++;
+ continue;
+ }
+
tdd = (struct Vmxnet3_TxDataDesc *)
((uint8 *)txq->data_ring.base +
txq->cmd_ring.next2fill *
@@ -477,6 +485,11 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* maximum size of mbuf segment size.
*/
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
+
+ /* Skip empty segments */
+ if (unlikely(m_seg->data_len == 0))
+ continue;
+
if (copy_size) {
uint64 offset =
(uint64)txq->cmd_ring.next2fill *
@@ -646,37 +659,154 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
return i;
}
-
-/* Receive side checksum and other offloads */
-static void
-vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
+/* MSS not provided by vmxnet3, guess one with available information */
+static uint16_t
+vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+ struct rte_mbuf *rxm)
{
- /* Check for RSS */
- if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
- rxm->ol_flags |= PKT_RX_RSS_HASH;
- rxm->hash.rss = rcd->rssHash;
- }
+ uint32_t hlen, slen;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct tcp_hdr *tcp_hdr;
+ char *ptr;
+
+ RTE_ASSERT(rcd->tcp);
+
+ ptr = rte_pktmbuf_mtod(rxm, char *);
+ slen = rte_pktmbuf_data_len(rxm);
+ hlen = sizeof(struct ether_hdr);
- /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
if (rcd->v4) {
- struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
- struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
+ if (unlikely(slen < hlen + sizeof(struct ipv4_hdr)))
+ return hw->mtu - sizeof(struct ipv4_hdr)
+ - sizeof(struct tcp_hdr);
+
+ ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen);
+ hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
+ IPV4_IHL_MULTIPLIER;
+ } else if (rcd->v6) {
+ if (unlikely(slen < hlen + sizeof(struct ipv6_hdr)))
+ return hw->mtu - sizeof(struct ipv6_hdr) -
+ sizeof(struct tcp_hdr);
+
+ ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen);
+ hlen += sizeof(struct ipv6_hdr);
+ if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) {
+ int frag;
+
+ rte_net_skip_ip6_ext(ipv6_hdr->proto, rxm,
+ &hlen, &frag);
+ }
+ }
+
+ if (unlikely(slen < hlen + sizeof(struct tcp_hdr)))
+ return hw->mtu - hlen - sizeof(struct tcp_hdr) +
+ sizeof(struct ether_hdr);
- if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
- rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
- else
- rxm->packet_type = RTE_PTYPE_L3_IPV4;
+ tcp_hdr = (struct tcp_hdr *)(ptr + hlen);
+ hlen += (tcp_hdr->data_off & 0xf0) >> 2;
- if (!rcd->cnc) {
- if (!rcd->ipc)
- rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ if (rxm->udata64 > 1)
+ return (rte_pktmbuf_pkt_len(rxm) - hlen +
+ rxm->udata64 - 1) / rxm->udata64;
+ else
+ return hw->mtu - hlen + sizeof(struct ether_hdr);
+}
- if ((rcd->tcp || rcd->udp) && !rcd->tuc)
- rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+/* Receive side checksum and other offloads */
+static inline void
+vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+ struct rte_mbuf *rxm, const uint8_t sop)
+{
+ uint64_t ol_flags = rxm->ol_flags;
+ uint32_t packet_type = rxm->packet_type;
+
+ /* Offloads set in sop */
+ if (sop) {
+ /* Set packet type */
+ packet_type |= RTE_PTYPE_L2_ETHER;
+
+ /* Check large packet receive */
+ if (VMXNET3_VERSION_GE_2(hw) &&
+ rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
+ const Vmxnet3_RxCompDescExt *rcde =
+ (const Vmxnet3_RxCompDescExt *)rcd;
+
+ rxm->tso_segsz = rcde->mss;
+ rxm->udata64 = rcde->segCnt;
+ ol_flags |= PKT_RX_LRO;
+ }
+ } else { /* Offloads set in eop */
+ /* Check for RSS */
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
+ ol_flags |= PKT_RX_RSS_HASH;
+ rxm->hash.rss = rcd->rssHash;
+ }
+
+ /* Check for hardware stripped VLAN tag */
+ if (rcd->ts) {
+ ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+ }
+
+ /* Check packet type, checksum errors, etc. */
+ if (rcd->cnc) {
+ ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else {
+ if (rcd->v4) {
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+ if (rcd->ipc)
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (rcd->tuc) {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (rcd->tcp)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else {
+ if (rcd->tcp) {
+ packet_type |= RTE_PTYPE_L4_TCP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (rcd->udp) {
+ packet_type |= RTE_PTYPE_L4_UDP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ } else if (rcd->v6) {
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ if (rcd->tuc) {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (rcd->tcp)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else {
+ if (rcd->tcp) {
+ packet_type |= RTE_PTYPE_L4_TCP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (rcd->udp) {
+ packet_type |= RTE_PTYPE_L4_UDP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ } else {
+ packet_type |= RTE_PTYPE_UNKNOWN;
+ }
+
+ /* Old variants of vmxnet3 do not provide MSS */
+ if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0)
+ rxm->tso_segsz = vmxnet3_guess_mss(hw,
+ rcd, rxm);
}
- } else {
- rxm->packet_type = RTE_PTYPE_UNKNOWN;
}
+
+ rxm->ol_flags = ol_flags;
+ rxm->packet_type = packet_type;
}
/*
@@ -776,6 +906,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
+ rxm->packet_type = 0;
/*
* If this is the first buffer of the received packet,
@@ -807,29 +938,28 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
rxq->start_seg = rxm;
- vmxnet3_rx_offload(rcd, rxm);
+ rxq->last_seg = rxm;
+ vmxnet3_rx_offload(hw, rcd, rxm, 1);
} else {
struct rte_mbuf *start = rxq->start_seg;
RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
- start->pkt_len += rxm->data_len;
- start->nb_segs++;
+ if (rxm->data_len) {
+ start->pkt_len += rxm->data_len;
+ start->nb_segs++;
- rxq->last_seg->next = rxm;
+ rxq->last_seg->next = rxm;
+ rxq->last_seg = rxm;
+ } else {
+ rte_pktmbuf_free_seg(rxm);
+ }
}
- rxq->last_seg = rxm;
if (rcd->eop) {
struct rte_mbuf *start = rxq->start_seg;
- /* Check for hardware stripped VLAN tag */
- if (rcd->ts) {
- start->ol_flags |= (PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED);
- start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
- }
-
+ vmxnet3_rx_offload(hw, rcd, start, 0);
rx_pkts[nb_rx++] = start;
rxq->start_seg = NULL;
}
@@ -883,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
const struct rte_memzone *mz;
@@ -895,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
- ETH_TXQ_FLAGS_NOXSUMSCTP) {
- PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
- return -EINVAL;
- }
-
txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
RTE_CACHE_LINE_SIZE);
if (txq == NULL) {
@@ -910,7 +1034,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
- txq->shared = &hw->tqd_start[queue_idx];
+ txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
txq->hw = hw;
txq->qid = queue_idx;
txq->stopped = TRUE;
@@ -1013,7 +1137,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->mp = mp;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- rxq->shared = &hw->rqd_start[queue_idx];
+ rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
rxq->hw = hw;
rxq->qid1 = queue_idx;
rxq->qid2 = queue_idx + hw->num_rx_queues;
diff --git a/drivers/raw/Makefile b/drivers/raw/Makefile
index da7c8b44..8e29b4a5 100644
--- a/drivers/raw/Makefile
+++ b/drivers/raw/Makefile
@@ -5,5 +5,10 @@ include $(RTE_SDK)/mk/rte.vars.mk
# DIRS-$(<configuration>) += <directory>
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga_rawdev
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/raw/dpaa2_cmdif/Makefile b/drivers/raw/dpaa2_cmdif/Makefile
new file mode 100644
index 00000000..9b863dda
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_cmdif.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+
+EXPORT_MAP := rte_pmd_dpaa2_cmdif_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV)-include += rte_pmd_dpaa2_cmdif.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
new file mode 100644
index 00000000..095a34b2
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_atomic.h>
+#include <rte_interrupts.h>
+#include <rte_branch_prediction.h>
+#include <rte_lcore.h>
+
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+#include "dpaa2_cmdif_logs.h"
+#include "rte_pmd_dpaa2_cmdif.h"
+
+/* Dynamic log type identifier */
+int dpaa2_cmdif_logtype;
+
+/* CMDIF driver name */
+#define DPAA2_CMDIF_PMD_NAME dpaa2_dpci
+
+/* CMDIF driver object */
+static struct rte_vdev_driver dpaa2_cmdif_drv;
+
+/*
+ * This API provides the DPCI device ID in 'attr_value'.
+ * The device ID shall be passed by GPP to the AIOP using CMDIF commands.
+ */
+static int
+dpaa2_cmdif_get_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ uint64_t *attr_value)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(attr_name);
+
+ if (!attr_value) {
+ DPAA2_CMDIF_ERR("Invalid arguments for getting attributes");
+ return -EINVAL;
+ }
+ *attr_value = cidev->dpci_id;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_send_cnxt;
+ struct dpaa2_queue *txq;
+ struct qbman_fd fd;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_send_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ txq = &(cidev->tx_queue[cmdif_send_cnxt->priority]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ /* Set some of the FD parameters to i.
+ * For performance reasons do not memset
+ */
+ fd.simple.bpid_offset = 0;
+ fd.simple.ctrl = 0;
+
+ DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr));
+ DPAA2_SET_FD_LEN(&fd, cmdif_send_cnxt->size);
+ DPAA2_SET_FD_FRC(&fd, cmdif_send_cnxt->frc);
+ DPAA2_SET_FD_FLC(&fd, cmdif_send_cnxt->flc);
+
+ /* Enqueue a packet to the QBMAN */
+ do {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
+ if (ret < 0 && ret != -EBUSY)
+ DPAA2_CMDIF_ERR("Transmit failure with err: %d\n", ret);
+ } while (ret == -EBUSY);
+
+ DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n");
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_rcv_cnxt;
+ struct dpaa2_queue *rxq;
+ struct qbman_swp *swp;
+ struct qbman_result *dq_storage;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+ uint8_t status;
+ int ret;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
+ dq_storage = rxq->q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
+ qbman_pull_desc_set_numframes(&pulldesc, 1);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ /* Check if previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ /* Loop until the dq_storage is updated with new token by QBMAN */
+ while (!qbman_result_has_new_result(swp, dq_storage))
+ ;
+
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n");
+ return 0;
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd) + DPAA2_GET_FD_OFFSET(fd));
+ cmdif_rcv_cnxt->size = DPAA2_GET_FD_LEN(fd);
+ cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd);
+ cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd);
+
+ DPAA2_CMDIF_DP_DEBUG("packet received\n");
+
+ return 1;
+}
+
+static const struct rte_rawdev_ops dpaa2_cmdif_ops = {
+ .attr_get = dpaa2_cmdif_get_attr,
+ .enqueue_bufs = dpaa2_cmdif_enqueue_bufs,
+ .dequeue_bufs = dpaa2_cmdif_dequeue_bufs,
+};
+
+static int
+dpaa2_cmdif_create(const char *name,
+ struct rte_vdev_device *vdev,
+ int socket_id)
+{
+ struct rte_rawdev *rawdev;
+ struct dpaa2_dpci_dev *cidev;
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct dpaa2_dpci_dev),
+ socket_id);
+ if (!rawdev) {
+ DPAA2_CMDIF_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ rawdev->dev_ops = &dpaa2_cmdif_ops;
+ rawdev->device = &vdev->device;
+ rawdev->driver_name = vdev->device.driver->name;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ cidev = rte_dpaa2_alloc_dpci_dev();
+ if (!cidev) {
+ DPAA2_CMDIF_ERR("Unable to allocate CI device");
+ rte_rawdev_pmd_release(rawdev);
+ return -ENODEV;
+ }
+
+ rawdev->dev_private = cidev;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ DPAA2_CMDIF_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ /* The primary process will only free the DPCI device */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_dpaa2_free_dpci_dev(rdev->dev_private);
+
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ DPAA2_CMDIF_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret = 0;
+
+ name = rte_vdev_device_name(vdev);
+
+ DPAA2_CMDIF_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_create(name, vdev, rte_socket_id());
+
+ return ret;
+}
+
+static int
+dpaa2_cmdif_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+
+ DPAA2_CMDIF_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_destroy(name);
+
+ return ret;
+}
+
+static struct rte_vdev_driver dpaa2_cmdif_drv = {
+ .probe = dpaa2_cmdif_probe,
+ .remove = dpaa2_cmdif_remove
+};
+
+RTE_PMD_REGISTER_VDEV(DPAA2_CMDIF_PMD_NAME, dpaa2_cmdif_drv);
+
+RTE_INIT(dpaa2_cmdif_init_log);
+
+static void
+dpaa2_cmdif_init_log(void)
+{
+ dpaa2_cmdif_logtype = rte_log_register("pmd.raw.dpaa2.cmdif");
+ if (dpaa2_cmdif_logtype >= 0)
+ rte_log_set_level(dpaa2_cmdif_logtype, RTE_LOG_INFO);
+}
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
new file mode 100644
index 00000000..598a621c
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_CMDIF_LOGS_H__
+#define __DPAA2_CMDIF_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_cmdif_logtype;
+
+#define DPAA2_CMDIF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_cmdif_logtype, "dpaa2_cmdif: " \
+ fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_cmdif_logtype, "dpaa2_cmdif: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_CMDIF_FUNC_TRACE() DPAA2_CMDIF_LOG(DEBUG, ">>")
+
+#define DPAA2_CMDIF_INFO(fmt, args...) \
+ DPAA2_CMDIF_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_ERR(fmt, args...) \
+ DPAA2_CMDIF_LOG(ERR, fmt, ## args)
+#define DPAA2_CMDIF_WARN(fmt, args...) \
+ DPAA2_CMDIF_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_CMDIF_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_cmdif: " fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DP_DEBUG(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_CMDIF_DP_INFO(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_DP_WARN(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_CMDIF_LOGS_H__ */
diff --git a/drivers/raw/dpaa2_cmdif/meson.build b/drivers/raw/dpaa2_cmdif/meson.build
new file mode 100644
index 00000000..8c909438
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+deps += ['rawdev', 'mempool_dpaa2', 'bus_vdev']
+sources = files('dpaa2_cmdif.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa2_cmdif.h')
diff --git a/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
new file mode 100644
index 00000000..483b66ea
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_CMDIF_H__
+#define __RTE_PMD_DPAA2_CMDIF_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 AIOP CMDIF PMD specific structures.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** The context required in the I/O path for DPAA2 AIOP Command Interface */
+struct rte_dpaa2_cmdif_context {
+ /** Size to populate in QBMAN FD */
+ uint32_t size;
+ /** FRC to populate in QBMAN FD */
+ uint32_t frc;
+ /** FLC to populate in QBMAN FD */
+ uint64_t flc;
+ /** Priority of the command. This priority determines DPCI Queue*/
+ uint8_t priority;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_PMD_DPAA2_CMDIF_H__ */
diff --git a/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/raw/dpaa2_qdma/Makefile b/drivers/raw/dpaa2_qdma/Makefile
new file mode 100644
index 00000000..d88809ea
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/Makefile
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_qdma.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_mempool
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_ring
+
+EXPORT_MAP := rte_pmd_dpaa2_qdma_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV)-include += rte_pmd_dpaa2_qdma.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
new file mode 100644
index 00000000..1d15c302
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -0,0 +1,1002 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_atomic.h>
+#include <rte_lcore.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+#include <mc/fsl_dpdmai.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+
+#include "dpaa2_qdma.h"
+#include "dpaa2_qdma_logs.h"
+#include "rte_pmd_dpaa2_qdma.h"
+
+/* Dynamic log type identifier */
+int dpaa2_qdma_logtype;
+
+/* QDMA device */
+static struct qdma_device qdma_dev;
+
+/* QDMA H/W queues list */
+TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
+static struct qdma_hw_queue_list qdma_queue_list
+ = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
+
+/* QDMA Virtual Queues */
+struct qdma_virt_queue *qdma_vqs;
+
+/* QDMA per core data */
+struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
+
+static struct qdma_hw_queue *
+alloc_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_hw_queue *queue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Get a free queue from the list */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next) {
+ if (queue->num_users == 0) {
+ queue->lcore_id = lcore_id;
+ queue->num_users++;
+ break;
+ }
+ }
+
+ return queue;
+}
+
+static void
+free_hw_queue(struct qdma_hw_queue *queue)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ queue->num_users--;
+}
+
+
+static struct qdma_hw_queue *
+get_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_per_core_info *core_info;
+ struct qdma_hw_queue *queue, *temp;
+ uint32_t least_num_users;
+ int num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+
+ /*
+ * Allocate a HW queue if there are less queues
+ * than maximum per core queues configured
+ */
+ if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
+ queue = alloc_hw_queue(lcore_id);
+ if (queue) {
+ core_info->hw_queues[num_hw_queues] = queue;
+ core_info->num_hw_queues++;
+ return queue;
+ }
+ }
+
+ queue = core_info->hw_queues[0];
+ /* In case there is no queue associated with the core return NULL */
+ if (!queue)
+ return NULL;
+
+ /* Fetch the least loaded H/W queue */
+ least_num_users = core_info->hw_queues[0]->num_users;
+ for (i = 0; i < num_hw_queues; i++) {
+ temp = core_info->hw_queues[i];
+ if (temp->num_users < least_num_users)
+ queue = temp;
+ }
+
+ if (queue)
+ queue->num_users++;
+
+ return queue;
+}
+
+static void
+put_hw_queue(struct qdma_hw_queue *queue)
+{
+ struct qdma_per_core_info *core_info;
+ int lcore_id, num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /*
+ * If this is the last user of the queue free it.
+ * Also remove it from QDMA core info.
+ */
+ if (queue->num_users == 1) {
+ free_hw_queue(queue);
+
+ /* Remove the physical queue from core info */
+ lcore_id = queue->lcore_id;
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+ for (i = 0; i < num_hw_queues; i++) {
+ if (queue == core_info->hw_queues[i])
+ break;
+ }
+ for (; i < num_hw_queues - 1; i++)
+ core_info->hw_queues[i] = core_info->hw_queues[i + 1];
+ core_info->hw_queues[i] = NULL;
+ } else {
+ queue->num_users--;
+ }
+}
+
+int __rte_experimental
+rte_qdma_init(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_init(&qdma_dev.lock);
+
+ return 0;
+}
+
+void __rte_experimental
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
+}
+
+int __rte_experimental
+rte_qdma_reset(void)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before reset.");
+ return -EBUSY;
+ }
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
+ qdma_vqs[i].num_dequeues))
+ DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+ return -EBUSY;
+ }
+
+ /* Reset HW queues */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next)
+ queue->num_users = 0;
+
+ /* Reset and free virtual queues */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ }
+ if (qdma_vqs)
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+
+ /* Reset per core info */
+ memset(&qdma_core_info, 0,
+ sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
+
+ /* Free the FLE pool */
+ if (qdma_dev.fle_pool)
+ rte_mempool_free(qdma_dev.fle_pool);
+
+ /* Reset QDMA device structure */
+ qdma_dev.mode = RTE_QDMA_MODE_HW;
+ qdma_dev.max_hw_queues_per_core = 0;
+ qdma_dev.fle_pool = NULL;
+ qdma_dev.fle_pool_count = 0;
+ qdma_dev.max_vqs = 0;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_configure(struct rte_qdma_config *qdma_config)
+{
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before config.");
+ return -1;
+ }
+
+ /* Reset the QDMA device */
+ ret = rte_qdma_reset();
+ if (ret) {
+ DPAA2_QDMA_ERR("Resetting QDMA failed");
+ return ret;
+ }
+
+ /* Set mode */
+ qdma_dev.mode = qdma_config->mode;
+
+ /* Set max HW queue per core */
+ if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
+ DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
+ MAX_HW_QUEUE_PER_CORE);
+ return -EINVAL;
+ }
+ qdma_dev.max_hw_queues_per_core =
+ qdma_config->max_hw_queues_per_core;
+
+ /* Allocate Virtual Queues */
+ qdma_vqs = rte_malloc("qdma_virtual_queues",
+ (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
+ RTE_CACHE_LINE_SIZE);
+ if (!qdma_vqs) {
+ DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+ return -ENOMEM;
+ }
+ qdma_dev.max_vqs = qdma_config->max_vqs;
+
+ /* Allocate FLE pool */
+ qdma_dev.fle_pool = rte_mempool_create("qdma_fle_pool",
+ qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
+ QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (!qdma_dev.fle_pool) {
+ DPAA2_QDMA_ERR("qdma_fle_pool create failed");
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+ return -ENOMEM;
+ }
+ qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_start(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 1;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
+{
+ char ring_name[32];
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ /* Get a free Virtual Queue */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use == 0)
+ break;
+ }
+
+ /* Return in case no VQ is free */
+ if (i == qdma_dev.max_vqs) {
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return -ENODEV;
+ }
+
+ if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
+ (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
+ /* Allocate HW queue for a VQ */
+ qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 1;
+ } else {
+ /* Allocate a Ring for Virutal Queue in VQ mode */
+ sprintf(ring_name, "status ring %d", i);
+ qdma_vqs[i].status_ring = rte_ring_create(ring_name,
+ qdma_dev.fle_pool_count, rte_socket_id(), 0);
+ if (!qdma_vqs[i].status_ring) {
+ DPAA2_QDMA_ERR("Status ring creation failed for vq");
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return rte_errno;
+ }
+
+ /* Get a HW queue (shared) for a VQ */
+ qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 0;
+ }
+
+ if (qdma_vqs[i].hw_queue == NULL) {
+ DPAA2_QDMA_ERR("No H/W queue available for VQ");
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ qdma_vqs[i].status_ring = NULL;
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return -ENODEV;
+ }
+
+ qdma_vqs[i].in_use = 1;
+ qdma_vqs[i].lcore_id = lcore_id;
+
+ rte_spinlock_unlock(&qdma_dev.lock);
+
+ return i;
+}
+
+static void
+dpaa2_qdma_populate_fle(struct qbman_fle *fle,
+ uint64_t src, uint64_t dest,
+ size_t len, uint32_t flags)
+{
+ struct qdma_sdd *sdd;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
+ (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
+
+ /* first frame list to source descriptor */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
+ DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+
+ /* source and destination descriptor */
+ DPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */
+ sdd++;
+ DPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */
+
+ fle++;
+ /* source frame list to source buffer */
+ if (flags & RTE_QDMA_JOB_SRC_PHY) {
+ DPAA2_SET_FLE_ADDR(fle, src);
+ DPAA2_SET_FLE_BMT(fle);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ fle++;
+ /* destination frame list to destination buffer */
+ if (flags & RTE_QDMA_JOB_DEST_PHY) {
+ DPAA2_SET_FLE_BMT(fle);
+ DPAA2_SET_FLE_ADDR(fle, dest);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ /* Final bit: 1, for last frame list */
+ DPAA2_SET_FLE_FIN(fle);
+}
+
+static int
+dpdmai_dev_enqueue(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t txq_id,
+ uint16_t vq_id,
+ struct rte_qdma_job *job)
+{
+ struct qdma_io_meta *io_meta;
+ struct qbman_fd fd;
+ struct dpaa2_queue *txq;
+ struct qbman_fle *fle;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ txq = &(dpdmai_dev->tx_queue[txq_id]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ /*
+ * Get an FLE/SDD from FLE pool.
+ * Note: IO metadata is before the FLE and SDD memory.
+ */
+ ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
+ if (ret) {
+ DPAA2_QDMA_DP_WARN("Memory alloc failed for FLE");
+ return ret;
+ }
+
+ /* Set the metadata */
+ io_meta->cnxt = (size_t)job;
+ io_meta->id = vq_id;
+
+ fle = (struct qbman_fle *)(io_meta + 1);
+
+ /* populate Frame descriptor */
+ memset(&fd, 0, sizeof(struct qbman_fd));
+ DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(&fd);
+ DPAA2_SET_FD_FRC(&fd, QDMA_SER_CTX);
+
+ /* Populate FLE */
+ memset(fle, 0, QDMA_FLE_POOL_SIZE);
+ dpaa2_qdma_populate_fle(fle, job->src, job->dest, job->len, job->flags);
+
+ /* Enqueue the packet to the QBMAN */
+ do {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
+ if (ret < 0 && ret != -EBUSY)
+ DPAA2_QDMA_ERR("Transmit failure with err: %d", ret);
+ } while (ret == -EBUSY);
+
+ DPAA2_QDMA_DP_DEBUG("Successfully transmitted a packet");
+
+ return ret;
+}
+
+int __rte_experimental
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ int i, ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < nb_jobs; i++) {
+ ret = rte_qdma_vq_enqueue(vq_id, job[i]);
+ if (ret < 0)
+ break;
+ }
+
+ return i;
+}
+
+int __rte_experimental
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != qdma_vq->lcore_id) {
+ DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
+ vq_id);
+ return -EINVAL;
+ }
+
+ ret = dpdmai_dev_enqueue(dpdmai_dev, qdma_pq->queue_id, vq_id, job);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
+ return ret;
+ }
+
+ qdma_vq->num_enqueues++;
+
+ return 1;
+}
+
+/* Function to receive a QDMA job for a given device and queue*/
+static int
+dpdmai_dev_dequeue(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t rxq_id,
+ uint16_t *vq_id,
+ struct rte_qdma_job **job)
+{
+ struct qdma_io_meta *io_meta;
+ struct dpaa2_queue *rxq;
+ struct qbman_result *dq_storage;
+ struct qbman_pull_desc pulldesc;
+ const struct qbman_fd *fd;
+ struct qbman_swp *swp;
+ struct qbman_fle *fle;
+ uint32_t fqid;
+ uint8_t status;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ rxq = &(dpdmai_dev->rx_queue[rxq_id]);
+ dq_storage = rxq->q_storage->dq_storage[0];
+ fqid = rxq->fqid;
+
+ /* Prepare dequeue descriptor */
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ qbman_pull_desc_set_numframes(&pulldesc, 1);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
+ continue;
+ }
+ break;
+ }
+
+ /* Check if previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ /* Loop until dq_storage is updated with new token by QBMAN */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_QDMA_DP_DEBUG("No frame is delivered");
+ return 0;
+ }
+
+ /* Get the FD */
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ /*
+ * Fetch metadata from FLE. job and vq_id were set
+ * in metadata in the enqueue operation.
+ */
+ fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ io_meta = (struct qdma_io_meta *)(fle) - 1;
+ if (vq_id)
+ *vq_id = io_meta->id;
+
+ *job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
+ (*job)->status = DPAA2_GET_FD_ERR(fd);
+
+ /* Free FLE to the pool */
+ rte_mempool_put(qdma_dev.fle_pool, io_meta);
+
+ DPAA2_QDMA_DP_DEBUG("packet received");
+
+ return 1;
+}
+
+int __rte_experimental
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < nb_jobs; i++) {
+ job[i] = rte_qdma_vq_dequeue(vq_id);
+ if (!job[i])
+ break;
+ }
+
+ return i;
+}
+
+struct rte_qdma_job * __rte_experimental
+rte_qdma_vq_dequeue(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ struct rte_qdma_job *job = NULL;
+ struct qdma_virt_queue *temp_qdma_vq;
+ int dequeue_budget = QDMA_DEQUEUE_BUDGET;
+ int ring_count, ret, i;
+ uint16_t temp_vq_id;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
+ DPAA2_QDMA_ERR("QDMA dequeue for vqid %d on wrong core",
+ vq_id);
+ return NULL;
+ }
+
+ /* Only dequeue when there are pending jobs on VQ */
+ if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
+ return NULL;
+
+ if (qdma_vq->exclusive_hw_queue) {
+ /* In case of exclusive queue directly fetch from HW queue */
+ ret = dpdmai_dev_dequeue(dpdmai_dev, qdma_pq->queue_id,
+ NULL, &job);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR(
+ "Dequeue from DPDMAI device failed: %d", ret);
+ return NULL;
+ }
+ } else {
+ /*
+ * Get the QDMA completed jobs from the software ring.
+ * In case they are not available on the ring poke the HW
+ * to fetch completed jobs from corresponding HW queues
+ */
+ ring_count = rte_ring_count(qdma_vq->status_ring);
+ if (ring_count == 0) {
+ /* TODO - How to have right budget */
+ for (i = 0; i < dequeue_budget; i++) {
+ ret = dpdmai_dev_dequeue(dpdmai_dev,
+ qdma_pq->queue_id, &temp_vq_id, &job);
+ if (ret == 0)
+ break;
+ temp_qdma_vq = &qdma_vqs[temp_vq_id];
+ rte_ring_enqueue(temp_qdma_vq->status_ring,
+ (void *)(job));
+ ring_count = rte_ring_count(
+ qdma_vq->status_ring);
+ if (ring_count)
+ break;
+ }
+ }
+
+ /* Dequeue job from the software ring to provide to the user */
+ rte_ring_dequeue(qdma_vq->status_ring, (void **)&job);
+ if (job)
+ qdma_vq->num_dequeues++;
+ }
+
+ return job;
+}
+
+void __rte_experimental
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_status)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (qdma_vq->in_use) {
+ vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
+ vq_status->lcore_id = qdma_vq->lcore_id;
+ vq_status->num_enqueues = qdma_vq->num_enqueues;
+ vq_status->num_dequeues = qdma_vq->num_dequeues;
+ vq_status->num_pending_jobs = vq_status->num_enqueues -
+ vq_status->num_dequeues;
+ }
+}
+
+int __rte_experimental
+rte_qdma_vq_destroy(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
+ return -EBUSY;
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ if (qdma_vq->exclusive_hw_queue)
+ free_hw_queue(qdma_vq->hw_queue);
+ else {
+ if (qdma_vqs->status_ring)
+ rte_ring_free(qdma_vqs->status_ring);
+
+ put_hw_queue(qdma_vq->hw_queue);
+ }
+
+ memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ return 0;
+}
+
+void __rte_experimental
+rte_qdma_stop(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 0;
+}
+
+void __rte_experimental
+rte_qdma_destroy(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_qdma_reset();
+}
+
+static const struct rte_rawdev_ops dpaa2_qdma_ops;
+
+static int
+add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
+ queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
+ if (!queue) {
+ DPAA2_QDMA_ERR(
+ "Memory allocation failed for QDMA queue");
+ return -ENOMEM;
+ }
+
+ queue->dpdmai_dev = dpdmai_dev;
+ queue->queue_id = i;
+
+ TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
+ qdma_dev.num_hw_queues++;
+ }
+
+ return 0;
+}
+
+static void
+remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue = NULL;
+ struct qdma_hw_queue *tqueue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
+ if (queue->dpdmai_dev == dpdmai_dev) {
+ TAILQ_REMOVE(&qdma_queue_list, queue, next);
+ rte_free(queue);
+ queue = NULL;
+ }
+ }
+}
+
+static int
+dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Remove HW queues from global list */
+ remove_hw_queues_from_list(dpdmai_dev);
+
+ ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("dmdmai disable failed");
+
+ /* Set up the DQRR storage for Rx */
+ for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
+
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ }
+ }
+
+ /* Close the device at underlying layer*/
+ ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("Failure closing dpdmai device");
+
+ return 0;
+}
+
+static int
+dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
+ struct dpdmai_attr attr;
+ struct dpdmai_rx_queue_attr rx_attr;
+ struct dpdmai_tx_queue_attr tx_attr;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Open DPDMAI device */
+ dpdmai_dev->dpdmai_id = dpdmai_id;
+ dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
+ return ret;
+ }
+
+ /* Get DPDMAI attributes */
+ ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, &attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->num_queues = attr.num_of_priorities;
+
+ /* Set up Rx Queues */
+ for (i = 0; i < attr.num_of_priorities; i++) {
+ struct dpaa2_queue *rxq;
+
+ memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+ ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
+ CMD_PRI_LOW,
+ dpdmai_dev->token,
+ i, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
+ ret);
+ goto init_err;
+ }
+
+ /* Allocate DQ storage for the DPDMAI Rx queues */
+ rxq = &(dpdmai_dev->rx_queue[i]);
+ rxq->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_QDMA_ERR("q_storage allocation failed");
+ ret = -ENOMEM;
+ goto init_err;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
+ goto init_err;
+ }
+ }
+
+ /* Get Rx and Tx queues FQID's */
+ for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, &rx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
+
+ ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, &tx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
+ }
+
+ /* Enable the device */
+ ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+ goto init_err;
+ }
+
+ /* Add the HW queue to the global list */
+ ret = add_hw_queues_to_list(dpdmai_dev);
+ if (ret) {
+ DPAA2_QDMA_ERR("Adding H/W queue to list failed");
+ goto init_err;
+ }
+ DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
+
+ return 0;
+init_err:
+ dpaa2_dpdmai_dev_uninit(rawdev);
+ return ret;
+}
+
+static int
+rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
+ sizeof(struct dpaa2_dpdmai_dev),
+ rte_socket_id());
+ if (!rawdev) {
+ DPAA2_QDMA_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ dpaa2_dev->rawdev = rawdev;
+ rawdev->dev_ops = &dpaa2_qdma_ops;
+ rawdev->device = &dpaa2_dev->device;
+ rawdev->driver_name = dpaa2_drv->driver.name;
+
+ /* Invoke PMD device initialization function */
+ ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
+ if (ret) {
+ rte_rawdev_pmd_release(rawdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ dpaa2_dpdmai_dev_uninit(rawdev);
+
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ DPAA2_QDMA_ERR("Device cleanup failed");
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
+ .drv_type = DPAA2_QDMA,
+ .probe = rte_dpaa2_qdma_probe,
+ .remove = rte_dpaa2_qdma_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
+
+RTE_INIT(dpaa2_qdma_init_log);
+static void
+dpaa2_qdma_init_log(void)
+{
+ dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
+ if (dpaa2_qdma_logtype >= 0)
+ rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);
+}
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
new file mode 100644
index 00000000..c6a05780
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_QDMA_H__
+#define __DPAA2_QDMA_H__
+
+struct qdma_sdd;
+struct qdma_io_meta;
+
+#define DPAA2_QDMA_MAX_FLE 3
+#define DPAA2_QDMA_MAX_SDD 2
+
+/** FLE pool size: 3 Frame list + 2 source/destination descriptor */
+#define QDMA_FLE_POOL_SIZE (sizeof(struct qdma_io_meta) + \
+ sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
+ sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
+/** FLE pool cache size */
+#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
+
+/** Notification by FQD_CTX[fqid] */
+#define QDMA_SER_CTX (1 << 8)
+
+/**
+ * Source descriptor command read transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_SET_SDD_RD_COHERENT(sdd) ((sdd)->cmd = (0xb << 28))
+/**
+ * Destination descriptor command write transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_SET_SDD_WR_COHERENT(sdd) ((sdd)->cmd = (0x6 << 28))
+
+/** Maximum possible H/W Queues on each core */
+#define MAX_HW_QUEUE_PER_CORE 64
+
+/**
+ * In case of Virtual Queue mode, this specifies the number of
+ * dequeue the 'qdma_vq_dequeue/multi' API does from the H/W Queue
+ * in case there is no job present on the Virtual Queue ring.
+ */
+#define QDMA_DEQUEUE_BUDGET 64
+
+/**
+ * Represents a QDMA device.
+ * A single QDMA device exists which is combination of multiple DPDMAI rawdev's.
+ */
+struct qdma_device {
+ /** total number of hw queues. */
+ uint16_t num_hw_queues;
+ /**
+ * Maximum number of hw queues to be alocated per core.
+ * This is limited by MAX_HW_QUEUE_PER_CORE
+ */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /** Device state - started or stopped */
+ uint8_t state;
+ /** FLE pool for the device */
+ struct rte_mempool *fle_pool;
+ /** FLE pool size */
+ int fle_pool_count;
+ /** A lock to QDMA device whenever required */
+ rte_spinlock_t lock;
+};
+
+/** Represents a QDMA H/W queue */
+struct qdma_hw_queue {
+ /** Pointer to Next instance */
+ TAILQ_ENTRY(qdma_hw_queue) next;
+ /** DPDMAI device to communicate with HW */
+ struct dpaa2_dpdmai_dev *dpdmai_dev;
+ /** queue ID to communicate with HW */
+ uint16_t queue_id;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** Number of users of this hw queue */
+ uint32_t num_users;
+};
+
+/** Represents a QDMA virtual queue */
+struct qdma_virt_queue {
+ /** Status ring of the virtual queue */
+ struct rte_ring *status_ring;
+ /** Associated hw queue */
+ struct qdma_hw_queue *hw_queue;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** States if this vq is in use or not */
+ uint8_t in_use;
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+};
+
+/** Represents a QDMA per core hw queues allocation in virtual mode */
+struct qdma_per_core_info {
+ /** list for allocated hw queues */
+ struct qdma_hw_queue *hw_queues[MAX_HW_QUEUE_PER_CORE];
+ /* Number of hw queues allocated for this core */
+ uint16_t num_hw_queues;
+};
+
+/** Metadata which is stored with each operation */
+struct qdma_io_meta {
+ /**
+ * Context which is stored in the FLE pool (just before the FLE).
+ * QDMA job is stored as a this context as a part of metadata.
+ */
+ uint64_t cnxt;
+ /** VQ ID is stored as a part of metadata of the enqueue command */
+ uint64_t id;
+};
+
+/** Source/Destination Descriptor */
+struct qdma_sdd {
+ uint32_t rsv;
+ /** Stride configuration */
+ uint32_t stride;
+ /** Route-by-port command */
+ uint32_t rbpcmd;
+ uint32_t cmd;
+} __attribute__((__packed__));
+
+/** Represents a DPDMAI raw device */
+struct dpaa2_dpdmai_dev {
+ /** Pointer to Next device instance */
+ TAILQ_ENTRY(dpaa2_qdma_device) next;
+ /** handle to DPDMAI object */
+ struct fsl_mc_io dpdmai;
+ /** HW ID for DPDMAI object */
+ uint32_t dpdmai_id;
+ /** Tocken of this device */
+ uint16_t token;
+ /** Number of queue in this DPDMAI device */
+ uint8_t num_queues;
+ /** RX queues */
+ struct dpaa2_queue rx_queue[DPDMAI_PRIO_NUM];
+ /** TX queues */
+ struct dpaa2_queue tx_queue[DPDMAI_PRIO_NUM];
+};
+
+#endif /* __DPAA2_QDMA_H__ */
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
new file mode 100644
index 00000000..fafe352b
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_QDMA_LOGS_H__
+#define __DPAA2_QDMA_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_qdma_logtype;
+
+#define DPAA2_QDMA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_qdma_logtype, "dpaa2_qdma: " \
+ fmt "\n", ## args)
+
+#define DPAA2_QDMA_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_qdma_logtype, "dpaa2_qdma: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_QDMA_FUNC_TRACE() DPAA2_QDMA_LOG(DEBUG, ">>")
+
+#define DPAA2_QDMA_INFO(fmt, args...) \
+ DPAA2_QDMA_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_ERR(fmt, args...) \
+ DPAA2_QDMA_LOG(ERR, fmt, ## args)
+#define DPAA2_QDMA_WARN(fmt, args...) \
+ DPAA2_QDMA_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_QDMA_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_qdma: " fmt "\n", ## args)
+
+#define DPAA2_QDMA_DP_DEBUG(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_QDMA_DP_INFO(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_DP_WARN(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_QDMA_LOGS_H__ */
diff --git a/drivers/raw/dpaa2_qdma/meson.build b/drivers/raw/dpaa2_qdma/meson.build
new file mode 100644
index 00000000..a2eb1d2f
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+deps += ['rawdev', 'mempool_dpaa2', 'ring']
+sources = files('dpaa2_qdma.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
new file mode 100644
index 00000000..17fffcb7
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_QDMA_H__
+#define __RTE_PMD_DPAA2_QDMA_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 QDMA specific structures.
+ *
+ */
+
+/** Determines the mode of operation */
+enum {
+ /**
+ * Allocate a H/W queue per VQ i.e. Exclusive hardware queue for a VQ.
+ * This mode will have best performance.
+ */
+ RTE_QDMA_MODE_HW,
+ /**
+ * A VQ shall not have an exclusive associated H/W queue.
+ * Rather a H/W Queue will be shared by multiple Virtual Queues.
+ * This mode will have intermediate data structures to support
+ * multi VQ to PQ mappings thus having some performance implications.
+ * Note: Even in this mode there is an option to allocate a H/W
+ * queue for a VQ. Please see 'RTE_QDMA_VQ_EXCLUSIVE_PQ' flag.
+ */
+ RTE_QDMA_MODE_VIRTUAL
+};
+
+/**
+ * If user has configued a Virtual Queue mode, but for some particular VQ
+ * user needs an exclusive H/W queue associated (for better performance
+ * on that particular VQ), then user can pass this flag while creating the
+ * Virtual Queue. A H/W queue will be allocated corresponding to
+ * VQ which uses this flag.
+ */
+#define RTE_QDMA_VQ_EXCLUSIVE_PQ (1ULL)
+
+/** States if the source addresses is physical. */
+#define RTE_QDMA_JOB_SRC_PHY (1ULL)
+
+/** States if the destination addresses is physical. */
+#define RTE_QDMA_JOB_DEST_PHY (1ULL << 1)
+
+/** Provides QDMA device attributes */
+struct rte_qdma_attr {
+ /** total number of hw QDMA queues present */
+ uint16_t num_hw_queues;
+};
+
+/** QDMA device configuration structure */
+struct rte_qdma_config {
+ /** Number of maximum hw queues to allocate per core. */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's to be used. */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /**
+ * User provides this as input to the driver as a size of the FLE pool.
+ * FLE's (and corresponding source/destination descriptors) are
+ * allocated by the driver at enqueue time to store src/dest and
+ * other data and are freed at the dequeue time. This determines the
+ * maximum number of inflight jobs on the QDMA device. This should
+ * be power of 2.
+ */
+ int fle_pool_count;
+};
+
+/** Provides QDMA device statistics */
+struct rte_qdma_vq_stats {
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+ /* total number of pending jobs in this VQ */
+ uint64_t num_pending_jobs;
+};
+
+/** Determines a QDMA job */
+struct rte_qdma_job {
+ /** Source Address from where DMA is (to be) performed */
+ uint64_t src;
+ /** Destination Address where DMA is (to be) done */
+ uint64_t dest;
+ /** Length of the DMA operation in bytes. */
+ uint32_t len;
+ /** See RTE_QDMA_JOB_ flags */
+ uint32_t flags;
+ /**
+ * User can specify a context which will be maintained
+ * on the dequeue operation.
+ */
+ uint64_t cnxt;
+ /**
+ * Status of the transaction.
+ * This is filled in the dequeue operation by the driver.
+ */
+ uint8_t status;
+};
+
+/**
+ * Initialize the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_init(void);
+
+/**
+ * Get the QDMA attributes.
+ *
+ * @param qdma_attr
+ * QDMA attributes providing total number of hw queues etc.
+ */
+void __rte_experimental
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr);
+
+/**
+ * Reset the QDMA device. This API will completely reset the QDMA
+ * device, bringing it to original state as if only rte_qdma_init() API
+ * has been called.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_reset(void);
+
+/**
+ * Configure the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_configure(struct rte_qdma_config *qdma_config);
+
+/**
+ * Start the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_start(void);
+
+/**
+ * Create a Virtual Queue on a particular lcore id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param lcore_id
+ * LCORE ID on which this particular queue would be associated with.
+ * @param flags
+ * RTE_QDMA_VQ_ flags. See macro definitions.
+ *
+ * @returns
+ * - >= 0: Virtual queue ID.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags);
+
+/**
+ * Enqueue multiple jobs to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA jobs provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs containing relevant information related to DMA.
+ * @param nb_jobs
+ * Number of QDMA jobs provided by the user.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Enqueue a single job to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA job provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * A QDMA Job containing relevant information related to DMA.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job);
+
+/**
+ * Dequeue multiple completed jobs from a Virtual Queue.
+ * Provides the list of completed jobs capped by nb_jobs.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs returned from the API.
+ * @param nb_jobs
+ * Number of QDMA jobs requested for dequeue by the user.
+ *
+ * @returns
+ * Number of jobs actually dequeued.
+ */
+int __rte_experimental
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Dequeue a single completed jobs from a Virtual Queue.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ *
+ * @returns
+ * - A completed job or NULL if no job is there.
+ */
+struct rte_qdma_job * __rte_experimental
+rte_qdma_vq_dequeue(uint16_t vq_id);
+
+/**
+ * Get a Virtual Queue statistics.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param vq_stats
+ * VQ statistics structure which will be filled in by the driver.
+ */
+void __rte_experimental
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_stats);
+
+/**
+ * Destroy the Virtual Queue specified by vq_id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param vq_id
+ * Virtual Queue ID which needs to be deinialized.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_destroy(uint16_t vq_id);
+
+/**
+ * Stop QDMA device.
+ */
+void __rte_experimental
+rte_qdma_stop(void);
+
+/**
+ * Destroy the QDMA device.
+ */
+void __rte_experimental
+rte_qdma_destroy(void);
+
+#endif /* __RTE_PMD_DPAA2_QDMA_H__*/
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map
new file mode 100644
index 00000000..fe42a227
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+ global:
+
+ rte_qdma_attr_get;
+ rte_qdma_configure;
+ rte_qdma_destroy;
+ rte_qdma_init;
+ rte_qdma_reset;
+ rte_qdma_start;
+ rte_qdma_stop;
+ rte_qdma_vq_create;
+ rte_qdma_vq_destroy;
+ rte_qdma_vq_dequeue;
+ rte_qdma_vq_dequeue_multi;
+ rte_qdma_vq_enqueue;
+ rte_qdma_vq_enqueue_multi;
+ rte_qdma_vq_stats;
+
+ local: *;
+};
diff --git a/drivers/raw/ifpga_rawdev/Makefile b/drivers/raw/ifpga_rawdev/Makefile
new file mode 100644
index 00000000..f3b9d5e6
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ifpga_rawdev.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/ifpga
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_ifpga
+
+EXPORT_MAP := rte_pmd_ifpga_rawdev_version.map
+
+LIBABIVER := 1
+
+VPATH += $(SRCDIR)/base
+
+include $(RTE_SDK)/drivers/raw/ifpga_rawdev/base/Makefile
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga_rawdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/ifpga_rawdev/base/Makefile b/drivers/raw/ifpga_rawdev/base/Makefile
new file mode 100644
index 00000000..d79da72b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/Makefile
@@ -0,0 +1,26 @@
+#SPDX-License-Identifier: BSD-3-Clause
+#Copyright(c) 2010-2018 Intel Corporation
+
+ifneq ($(CONFIG_RTE_LIBRTE_EAL),)
+OSDEP := osdep_rte
+else
+OSDEP := osdep_raw
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev/base/$(OSDEP)
+
+SRCS-y += ifpga_api.c
+SRCS-y += ifpga_enumerate.c
+SRCS-y += ifpga_feature_dev.c
+SRCS-y += ifpga_fme.c
+SRCS-y += ifpga_fme_iperf.c
+SRCS-y += ifpga_fme_dperf.c
+SRCS-y += ifpga_fme_error.c
+SRCS-y += ifpga_port.c
+SRCS-y += ifpga_port_error.c
+SRCS-y += opae_hw_api.c
+SRCS-y += opae_ifpga_hw_api.c
+SRCS-y += opae_debug.c
+SRCS-y += ifpga_fme_pr.c
+
+SRCS-y += $(wildcard $(SRCDIR)/base/$(OSDEP)/*.c)
diff --git a/drivers/raw/ifpga_rawdev/base/README b/drivers/raw/ifpga_rawdev/base/README
new file mode 100644
index 00000000..5bc2ed0f
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/README
@@ -0,0 +1,31 @@
+..
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+Intel iFPGA driver
+==================
+
+This directory contains source code of Intel FPGA driver released by
+the team which develops Intel FPGA Open Programmable Acceleration Engine (OPAE).
+The directory of base/ contains the original source package. The base code
+currently supports Intel FPGA solutions including integrated solution (Intel(R)
+Xeon(R) CPU with FPGAs) and discrete solution (Intel(R) Programmable Acceleration
+Card with Intel(R) Arria(R) 10 FPGA) and it could be extended to support more FPGA
+devices in the future.
+
+Please refer to [1][2] for more introduction on OPAE and Intel FPGAs.
+
+[1] https://01.org/OPAE
+[2] https://www.altera.com/solutions/acceleration-hub/overview.html
+
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ osdep_raw/osdep_generic.h
+ osdep_rte/osdep_generic.h
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.c b/drivers/raw/ifpga_rawdev/base/ifpga_api.c
new file mode 100644
index 00000000..540e171a
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_api.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+#include "opae_hw_api.h"
+
+/* Accelerator APIs */
+static int ifpga_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return fpga_get_afu_uuid(port, uuid);
+}
+
+static int ifpga_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+ struct fpga_uafu_irq_set irq_set;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ if (start >= afu_info->num_irqs || start + count > afu_info->num_irqs)
+ return -EINVAL;
+
+ port = br->data;
+
+ irq_set.start = start;
+ irq_set.count = count;
+ irq_set.evtfds = evtfds;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_UINT, &irq_set);
+}
+
+static int ifpga_acc_get_info(struct opae_accelerator *acc,
+ struct opae_acc_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -ENODEV;
+
+ info->num_regions = afu_info->num_regions;
+ info->num_irqs = afu_info->num_irqs;
+
+ return 0;
+}
+
+static int ifpga_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (info->index >= afu_info->num_regions)
+ return -EINVAL;
+
+ /* always one RW region only for AFU now */
+ info->flags = ACC_REGION_READ | ACC_REGION_WRITE | ACC_REGION_MMIO;
+ info->len = afu_info->region[info->index].len;
+ info->addr = afu_info->region[info->index].addr;
+
+ return 0;
+}
+
+static int ifpga_acc_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ switch (byte) {
+ case 8:
+ *(u64 *)data = opae_readq(region->addr + offset);
+ break;
+ case 4:
+ *(u32 *)data = opae_readl(region->addr + offset);
+ break;
+ case 2:
+ *(u16 *)data = opae_readw(region->addr + offset);
+ break;
+ case 1:
+ *(u8 *)data = opae_readb(region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ifpga_acc_write(struct opae_accelerator *acc,
+ unsigned int region_idx, u64 offset,
+ unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ /* normal mmio case */
+ switch (byte) {
+ case 8:
+ opae_writeq(*(u64 *)data, region->addr + offset);
+ break;
+ case 4:
+ opae_writel(*(u32 *)data, region->addr + offset);
+ break;
+ case 2:
+ opae_writew(*(u16 *)data, region->addr + offset);
+ break;
+ case 1:
+ opae_writeb(*(u8 *)data, region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct opae_accelerator_ops ifpga_acc_ops = {
+ .read = ifpga_acc_read,
+ .write = ifpga_acc_write,
+ .set_irq = ifpga_acc_set_irq,
+ .get_info = ifpga_acc_get_info,
+ .get_region_info = ifpga_acc_get_region_info,
+ .get_uuid = ifpga_acc_get_uuid,
+};
+
+/* Bridge APIs */
+
+static int ifpga_br_reset(struct opae_bridge *br)
+{
+ struct ifpga_port_hw *port = br->data;
+
+ return fpga_port_reset(port);
+}
+
+struct opae_bridge_ops ifpga_br_ops = {
+ .reset = ifpga_br_reset,
+};
+
+/* Manager APIs */
+static int ifpga_mgr_flash(struct opae_manager *mgr, int id, void *buf,
+ u32 size, u64 *status)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+ struct ifpga_hw *hw = fme->parent;
+
+ return ifpga_pr(hw, id, buf, size, status);
+}
+
+struct opae_manager_ops ifpga_mgr_ops = {
+ .flash = ifpga_mgr_flash,
+};
+
+/* Adapter APIs */
+static int ifpga_adapter_enumerate(struct opae_adapter *adapter)
+{
+ struct ifpga_hw *hw = malloc(sizeof(*hw));
+
+ if (hw) {
+ memset(hw, 0, sizeof(*hw));
+ hw->pci_data = adapter->data;
+ hw->adapter = adapter;
+ if (ifpga_bus_enumerate(hw))
+ goto error;
+ return ifpga_bus_init(hw);
+ }
+
+error:
+ return -ENOMEM;
+}
+
+struct opae_adapter_ops ifpga_adapter_ops = {
+ .enumerate = ifpga_adapter_enumerate,
+};
+
+/**
+ * ifpga_pr - do the partial reconfiguration for a given port device
+ * @hw: pointer to the HW structure
+ * @port_id: the port device id
+ * @buffer: the buffer of the bitstream
+ * @size: the size of the bitstream
+ * @status: hardware status including PR error code if return -EIO.
+ *
+ * @return
+ * - 0: Success, partial reconfiguration finished.
+ * - <0: Error code returned in partial reconfiguration.
+ **/
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+
+ return do_pr(hw, port_id, buffer, size, status);
+}
+
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_get_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_get_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set)
+{
+ if (!hw || !irq_set)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_irq(&hw->fme, feature_id, irq_set);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_irq(&hw->port[port_id], feature_id, irq_set);
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.h b/drivers/raw/ifpga_rawdev/base/ifpga_api.h
new file mode 100644
index 00000000..dae7ca14
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_API_H_
+#define _IFPGA_API_H_
+
+#include "opae_hw_api.h"
+#include "ifpga_hw.h"
+
+extern struct opae_adapter_ops ifpga_adapter_ops;
+extern struct opae_manager_ops ifpga_mgr_ops;
+extern struct opae_bridge_ops ifpga_br_ops;
+extern struct opae_accelerator_ops ifpga_acc_ops;
+
+/* common APIs */
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set);
+
+/* FME APIs */
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status);
+
+#endif /* _IFPGA_API_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_compat.h b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
new file mode 100644
index 00000000..931c8938
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_COMPAT_H_
+#define _IFPGA_COMPAT_H_
+
+#include "opae_osdep.h"
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define IFPGA_PAGE_SHIFT 12
+#define IFPGA_PAGE_SIZE (1 << IFPGA_PAGE_SHIFT)
+#define IFPGA_PAGE_MASK (~(IFPGA_PAGE_SIZE - 1))
+#define IFPGA_PAGE_ALIGN(addr) (((addr) + IFPGA_PAGE_SIZE - 1)\
+ & IFPGA_PAGE_MASK)
+#define IFPGA_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), IFPGA_PAGE_SIZE)
+
+#define readl(addr) opae_readl(addr)
+#define readq(addr) opae_readq(addr)
+#define writel(value, addr) opae_writel(value, addr)
+#define writeq(value, addr) opae_writeq(value, addr)
+
+#define malloc(size) opae_malloc(size)
+#define zmalloc(size) opae_zmalloc(size)
+#define free(size) opae_free(size)
+
+/*
+ * Wait register's _field to be changed to the given value (_expect's _field)
+ * by polling with given interval and timeout.
+ */
+#define fpga_wait_register_field(_field, _expect, _reg_addr, _timeout, _invl)\
+({ \
+ int wait = 0; \
+ int ret = -ETIMEDOUT; \
+ typeof(_expect) value; \
+ for (; wait <= _timeout; wait += _invl) { \
+ value.csr = readq(_reg_addr); \
+ if (_expect._field == value._field) { \
+ ret = 0; \
+ break; \
+ } \
+ udelay(_invl); \
+ } \
+ ret; \
+})
+
+#define __maybe_unused __attribute__((__unused__))
+
+#define UNUSED(x) (void)(x)
+
+#endif /* _IFPGA_COMPAT_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_defines.h b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
new file mode 100644
index 00000000..aa025272
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
@@ -0,0 +1,1663 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_DEFINES_H_
+#define _IFPGA_DEFINES_H_
+
+#include "ifpga_compat.h"
+
+#define MAX_FPGA_PORT_NUM 4
+
+#define FME_FEATURE_HEADER "fme_hdr"
+#define FME_FEATURE_THERMAL_MGMT "fme_thermal"
+#define FME_FEATURE_POWER_MGMT "fme_power"
+#define FME_FEATURE_GLOBAL_IPERF "fme_iperf"
+#define FME_FEATURE_GLOBAL_ERR "fme_error"
+#define FME_FEATURE_PR_MGMT "fme_pr"
+#define FME_FEATURE_HSSI_ETH "fme_hssi"
+#define FME_FEATURE_GLOBAL_DPERF "fme_dperf"
+#define FME_FEATURE_QSPI_FLASH "fme_qspi_flash"
+
+#define PORT_FEATURE_HEADER "port_hdr"
+#define PORT_FEATURE_UAFU "port_uafu"
+#define PORT_FEATURE_ERR "port_err"
+#define PORT_FEATURE_UMSG "port_umsg"
+#define PORT_FEATURE_PR "port_pr"
+#define PORT_FEATURE_UINT "port_uint"
+#define PORT_FEATURE_STP "port_stp"
+
+/*
+ * do not check the revision id as id may be dynamic under
+ * some cases, e.g, UAFU.
+ */
+#define SKIP_REVISION_CHECK 0xff
+
+#define FME_HEADER_REVISION 1
+#define FME_THERMAL_MGMT_REVISION 0
+#define FME_POWER_MGMT_REVISION 1
+#define FME_GLOBAL_IPERF_REVISION 1
+#define FME_GLOBAL_ERR_REVISION 1
+#define FME_PR_MGMT_REVISION 2
+#define FME_HSSI_ETH_REVISION 0
+#define FME_GLOBAL_DPERF_REVISION 0
+#define FME_QSPI_REVISION 0
+
+#define PORT_HEADER_REVISION 0
+/* UAFU's header info depends on the downloaded GBS */
+#define PORT_UAFU_REVISION SKIP_REVISION_CHECK
+#define PORT_ERR_REVISION 1
+#define PORT_UMSG_REVISION 0
+#define PORT_UINT_REVISION 0
+#define PORT_STP_REVISION 1
+
+#define FEATURE_TYPE_AFU 0x1
+#define FEATURE_TYPE_BBB 0x2
+#define FEATURE_TYPE_PRIVATE 0x3
+#define FEATURE_TYPE_FIU 0x4
+
+#define FEATURE_FIU_ID_FME 0x0
+#define FEATURE_FIU_ID_PORT 0x1
+
+#define FEATURE_ID_HEADER 0x0
+#define FEATURE_ID_AFU 0xff
+
+enum fpga_id_type {
+ FME_ID,
+ PORT_ID,
+ FPGA_ID_MAX,
+};
+
+enum fme_feature_id {
+ FME_FEATURE_ID_HEADER = 0x0,
+
+ FME_FEATURE_ID_THERMAL_MGMT = 0x1,
+ FME_FEATURE_ID_POWER_MGMT = 0x2,
+ FME_FEATURE_ID_GLOBAL_IPERF = 0x3,
+ FME_FEATURE_ID_GLOBAL_ERR = 0x4,
+ FME_FEATURE_ID_PR_MGMT = 0x5,
+ FME_FEATURE_ID_HSSI_ETH = 0x6,
+ FME_FEATURE_ID_GLOBAL_DPERF = 0x7,
+ FME_FEATURE_ID_QSPI_FLASH = 0x8,
+
+ /* one for fme header. */
+ FME_FEATURE_ID_MAX = 0x9,
+};
+
+enum port_feature_id {
+ PORT_FEATURE_ID_HEADER = 0x0,
+ PORT_FEATURE_ID_ERROR = 0x1,
+ PORT_FEATURE_ID_UMSG = 0x2,
+ PORT_FEATURE_ID_UINT = 0x3,
+ PORT_FEATURE_ID_STP = 0x4,
+ PORT_FEATURE_ID_UAFU = 0x5,
+ PORT_FEATURE_ID_MAX = 0x6,
+};
+
+/*
+ * All headers and structures must be byte-packed to match the spec.
+ */
+#pragma pack(push, 1)
+
+struct feature_header {
+ union {
+ u64 csr;
+ struct {
+ u16 id:12;
+ u8 revision:4;
+ u32 next_header_offset:24;
+ u8 end_of_list:1;
+ u32 reserved:19;
+ u8 type:4;
+ };
+ };
+};
+
+struct feature_bbb_header {
+ struct uuid guid;
+};
+
+struct feature_afu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fiu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fme_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_verid; /* Fabric version ID */
+ u8 socket_id:1; /* Socket id */
+ u8 rsvd1:3; /* Reserved */
+ /* pci0 link available yes /no */
+ u8 pci0_link_avile:1;
+ /* pci1 link available yes /no */
+ u8 pci1_link_avile:1;
+ /* Coherent (QPI/UPI) link available yes /no */
+ u8 qpi_link_avile:1;
+ u8 rsvd2:1; /* Reserved */
+ /* IOMMU or VT-d supported yes/no */
+ u8 iommu_support:1;
+ u8 num_ports:3; /* Number of ports */
+ u8 sf_fab_ctl:1; /* Internal validation bit */
+ u8 rsvd3:3; /* Reserved */
+ /*
+ * Address width supported in bits
+ * BXT -0x26 , SKX -0x30
+ */
+ u8 address_width_bits:6;
+ u8 rsvd4:2; /* Reserved */
+ /* Size of cache supported in kb */
+ u16 cache_size:12;
+ u8 cache_assoc:4; /* Cache Associativity */
+ u16 rsvd5:15; /* Reserved */
+ u8 lock_bit:1; /* Lock bit */
+ };
+ };
+};
+
+#define FME_AFU_ACCESS_PF 0
+#define FME_AFU_ACCESS_VF 1
+
+struct feature_fme_port {
+ union {
+ u64 csr;
+ struct {
+ u32 port_offset:24;
+ u8 reserved1;
+ u8 port_bar:3;
+ u32 reserved2:20;
+ u8 afu_access_control:1;
+ u8 reserved3:4;
+ u8 port_implemented:1;
+ u8 reserved4:3;
+ };
+ };
+};
+
+struct feature_fme_fab_status {
+ union {
+ u64 csr;
+ struct {
+ u8 upilink_status:4; /* UPI Link Status */
+ u8 rsvd1:4; /* Reserved */
+ u8 pci0link_status:1; /* pci0 link status */
+ u8 rsvd2:3; /* Reserved */
+ u8 pci1link_status:1; /* pci1 link status */
+ u64 rsvd3:51; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_base {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Base Address of memory range */
+ u8 protected_base_addrss:4;
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_limit {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Limit Address of memory range */
+ u8 protected_limit_addrss:4;
+ u16 rsvd2:11; /* Reserved */
+ u8 enable:1; /* Enable GENPROTRANGE check */
+ u32 rsvd3; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_dxe_lock {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Determines write access to the DXE region CSRs
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_early_lock:1;
+ /*
+ * Determines write access to the HSSI CSR
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_late_lock:1;
+ u64 rsvd:62;
+ };
+ };
+};
+
+#define HSSI_ID_NO_HASSI 0
+#define HSSI_ID_PCIE_RP 1
+#define HSSI_ID_ETHERNET 2
+
+struct feature_fme_bitstream_id {
+ union {
+ u64 csr;
+ struct {
+ u32 gitrepo_hash:32; /* GIT repository hash */
+ /*
+ * HSSI configuration identifier:
+ * 0 - No HSSI
+ * 1 - PCIe-RP
+ * 2 - Ethernet
+ */
+ u8 hssi_id:4;
+ u16 rsvd1:12; /* Reserved */
+ /* Bitstream version patch number */
+ u8 bs_verpatch:4;
+ /* Bitstream version minor number */
+ u8 bs_verminor:4;
+ /* Bitstream version major number */
+ u8 bs_vermajor:4;
+ /* Bitstream version debug number */
+ u8 bs_verdebug:4;
+ };
+ };
+};
+
+struct feature_fme_bitstream_md {
+ union {
+ u64 csr;
+ struct {
+ /* Seed number userd for synthesis flow */
+ u8 synth_seed:4;
+ /* Synthesis date(day number - 2 digits) */
+ u8 synth_day:8;
+ /* Synthesis date(month number - 2 digits) */
+ u8 synth_month:8;
+ /* Synthesis date(year number - 2 digits) */
+ u8 synth_year:8;
+ u64 rsvd:36; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_iommu_ctrl {
+ union {
+ u64 csr;
+ struct {
+ /* Disables IOMMU prefetcher for C0 channel */
+ u8 prefetch_disableC0:1;
+ /* Disables IOMMU prefetcher for C1 channel */
+ u8 prefetch_disableC1:1;
+ /* Disables IOMMU partial cache line writes */
+ u8 prefetch_wrdisable:1;
+ u8 rsvd1:1; /* Reserved */
+ /*
+ * Select counter and read value from register
+ * iommu_stat.dbg_counters
+ * 0 - Number of 4K page translation response
+ * 1 - Number of 2M page translation response
+ * 2 - Number of 1G page translation response
+ */
+ u8 counter_sel:2;
+ u32 rsvd2:26; /* Reserved */
+ /* Connected to IOMMU SIP Capabilities */
+ u32 capecap_defeature;
+ };
+ };
+};
+
+struct feature_fme_iommu_stat {
+ union {
+ u64 csr;
+ struct {
+ /* Translation Enable bit from IOMMU SIP */
+ u8 translation_enable:1;
+ /* Drain request in progress */
+ u8 drain_req_inprog:1;
+ /* Invalidation current state */
+ u8 inv_state:3;
+ /* C0 Response Buffer current state */
+ u8 respbuffer_stateC0:3;
+ /* C1 Response Buffer current state */
+ u8 respbuffer_stateC1:3;
+ /* Last request ID to IOMMU SIP */
+ u8 last_reqID:4;
+ /* Last IOMMU SIP response ID value */
+ u8 last_respID:4;
+ /* Last IOMMU SIP response status value */
+ u8 last_respstatus:3;
+ /* C0 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC0:1;
+ /* C1 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC1:1;
+ /* C0 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC0:1;
+ /* C1 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC1:1;
+ /* C0 Response FIFO is not empty */
+ u8 respFIFO_notemptyC0:1;
+ /* C1 Response FIFO is not empty */
+ u8 respFIFO_notemptyC1:1;
+ /* C0 Response FIFO overflow detected */
+ u8 respFIFO_overflowC0:1;
+ /* C1 Response FIFO overflow detected */
+ u8 respFIFO_overflowC1:1;
+ /* C0 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC0:1;
+ /* C1 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC1:1;
+ /* Request FIFO overflow detected */
+ u8 reqFIFO_overflow:1;
+ /* IOMMU memory read in progress */
+ u8 memrd_inprog:1;
+ /* IOMMU memory write in progress */
+ u8 memwr_inprog:1;
+ u8 rsvd1:1; /* Reserved */
+ /* Value of counter selected by iommu_ctl.counter_sel */
+ u16 dbg_counters:16;
+ u16 rsvd2:12; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_pcie0_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_bar_lock:1; /* Lock VT-D BAR register */
+ u64 rsvd1:3;
+ u64 rciep:1; /* Configure PCIE0 as RCiEP */
+ u64 rsvd2:59;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR rule is valid or not */
+ /*
+ * SMRR memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR2 rule is valid or not */
+ /*
+ * SMRR2 memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_base {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of base address memory range */
+ u64 me_base:27;
+ u64 rsvd:37;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_limit {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of limit address memory range */
+ u64 me_limit:27;
+ u64 rsvd1:4;
+ u64 enable:1; /* Enable LLPR MESEG rule */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 reserved;
+ u64 scratchpad;
+ struct feature_fme_capability capability;
+ struct feature_fme_port port[MAX_FPGA_PORT_NUM];
+ struct feature_fme_fab_status fab_status;
+ struct feature_fme_bitstream_id bitstream_id;
+ struct feature_fme_bitstream_md bitstream_md;
+ struct feature_fme_genprotrange2_base genprotrange2_base;
+ struct feature_fme_genprotrange2_limit genprotrange2_limit;
+ struct feature_fme_dxe_lock dxe_lock;
+ struct feature_fme_iommu_ctrl iommu_ctrl;
+ struct feature_fme_iommu_stat iommu_stat;
+ struct feature_fme_pcie0_ctrl pcie0_control;
+ struct feature_fme_llpr_smrr_base smrr_base;
+ struct feature_fme_llpr_smrr_mask smrr_mask;
+ struct feature_fme_llpr_smrr2_base smrr2_base;
+ struct feature_fme_llpr_smrr2_mask smrr2_mask;
+ struct feature_fme_llpr_meseg_base meseg_base;
+ struct feature_fme_llpr_meseg_limit meseg_limit;
+};
+
+struct feature_port_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 port_number:2; /* Port Number 0-3 */
+ u8 rsvd1:6; /* Reserved */
+ u16 mmio_size; /* User MMIO size in KB */
+ u8 rsvd2; /* Reserved */
+ u8 sp_intr_num:4; /* Supported interrupts num */
+ u32 rsvd3:28; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_control {
+ union {
+ u64 csr;
+ struct {
+ u8 port_sftrst:1; /* Port Soft Reset */
+ u8 rsvd1:1; /* Reserved */
+ u8 latency_tolerance:1;/* '1' >= 40us, '0' < 40us */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_sftrst_ack:1; /* HW ACK for Soft Reset */
+ u64 rsvd3:59; /* Reserved */
+ };
+ };
+};
+
+#define PORT_POWER_STATE_NORMAL 0
+#define PORT_POWER_STATE_AP1 1
+#define PORT_POWER_STATE_AP2 2
+#define PORT_POWER_STATE_AP6 6
+
+struct feature_port_status {
+ union {
+ u64 csr;
+ struct {
+ u8 port_freeze:1; /* '1' - freezed '0' - normal */
+ u8 rsvd1:7; /* Reserved */
+ u8 power_state:4; /* Power State */
+ u8 ap1_event:1; /* AP1 event was detected */
+ u8 ap2_event:1; /* AP2 event was detected */
+ u64 rsvd2:50; /* Reserved */
+ };
+ };
+};
+
+/* Port Header Register Set */
+struct feature_port_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 port_mailbox;
+ u64 scratchpad;
+ struct feature_port_capability capability;
+ struct feature_port_control control;
+ struct feature_port_status status;
+ u64 rsvd2;
+ u64 user_clk_freq_cmd0;
+ u64 user_clk_freq_cmd1;
+ u64 user_clk_freq_sts0;
+ u64 user_clk_freq_sts1;
+};
+
+struct feature_fme_tmp_threshold {
+ union {
+ u64 csr;
+ struct {
+ u8 tmp_thshold1:7; /* temperature Threshold 1 */
+ /* temperature Threshold 1 enable/disable */
+ u8 tmp_thshold1_enable:1;
+ u8 tmp_thshold2:7; /* temperature Threshold 2 */
+ /* temperature Threshold 2 enable /disable */
+ u8 tmp_thshold2_enable:1;
+ u8 pro_hot_setpoint:7; /* Proc Hot set point */
+ u8 rsvd4:1; /* Reserved */
+ u8 therm_trip_thshold:7; /* Thermeal Trip Threshold */
+ u8 rsvd3:1; /* Reserved */
+ u8 thshold1_status:1; /* Threshold 1 Status */
+ u8 thshold2_status:1; /* Threshold 2 Status */
+ u8 rsvd5:1; /* Reserved */
+ /* Thermeal Trip Threshold status */
+ u8 therm_trip_thshold_status:1;
+ u8 rsvd6:4; /* Reserved */
+ /* Validation mode- Force Proc Hot */
+ u8 valmodeforce:1;
+ /* Validation mode - Therm trip Hot */
+ u8 valmodetherm:1;
+ u8 rsvd2:2; /* Reserved */
+ u8 thshold_policy:1; /* threshold policy */
+ u32 rsvd:19; /* Reserved */
+ };
+ };
+};
+
+/* Temperature Sensor Read values format 1 */
+struct feature_fme_temp_rdsensor_fmt1 {
+ union {
+ u64 csr;
+ struct {
+ /* Reads out FPGA temperature in celsius */
+ u8 fpga_temp:7;
+ u8 rsvd0:1; /* Reserved */
+ /* Temperature reading sequence number */
+ u16 tmp_reading_seq_num;
+ /* Temperature reading is valid */
+ u8 tmp_reading_valid:1;
+ u8 rsvd1:7; /* Reserved */
+ u16 dbg_mode:10; /* Debug mode */
+ u32 rsvd2:22; /* Reserved */
+ };
+ };
+};
+
+/* Temperature sensor read values format 2 */
+struct feature_fme_temp_rdsensor_fmt2 {
+ u64 rsvd; /* Reserved */
+};
+
+/* Temperature Threshold Capability Register */
+struct feature_fme_tmp_threshold_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Temperature Threshold Unsupported */
+ u8 tmp_thshold_disabled:1;
+ u64 rsvd:63; /* Reserved */
+ };
+ };
+};
+
+/* FME THERNAL FEATURE */
+struct feature_fme_thermal {
+ struct feature_header header;
+ struct feature_fme_tmp_threshold threshold;
+ struct feature_fme_temp_rdsensor_fmt1 rdsensor_fm1;
+ struct feature_fme_temp_rdsensor_fmt2 rdsensor_fm2;
+ struct feature_fme_tmp_threshold_cap threshold_cap;
+};
+
+/* Power Status register */
+struct feature_fme_pm_status {
+ union {
+ u64 csr;
+ struct {
+ /* FPGA Power consumed, The format is to be defined */
+ u32 pwr_consumed:18;
+ /* FPGA Latency Tolerance Reporting */
+ u8 fpga_latency_report:1;
+ u64 rsvd:45; /* Reserved */
+ };
+ };
+};
+
+/* AP Thresholds */
+struct feature_fme_pm_ap_threshold {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Number of clocks (5ns period) for assertion
+ * of FME_data
+ */
+ u8 threshold1:7;
+ u8 rsvd1:1;
+ u8 threshold2:7;
+ u8 rsvd2:1;
+ u8 threshold1_status:1;
+ u8 threshold2_status:1;
+ u64 rsvd3:46; /* Reserved */
+ };
+ };
+};
+
+/* Xeon Power Limit */
+struct feature_fme_pm_xeon_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FPGA Power Limit */
+struct feature_fme_pm_fpga_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FME POWER FEATURE */
+struct feature_fme_power {
+ struct feature_header header;
+ struct feature_fme_pm_status status;
+ struct feature_fme_pm_ap_threshold threshold;
+ struct feature_fme_pm_xeon_limit xeon_limit;
+ struct feature_fme_pm_fpga_limit fpga_limit;
+};
+
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+
+enum iperf_cache_events {
+ IPERF_CACHE_RD_HIT,
+ IPERF_CACHE_WR_HIT,
+ IPERF_CACHE_RD_MISS,
+ IPERF_CACHE_WR_MISS,
+ IPERF_CACHE_RSVD, /* reserved */
+ IPERF_CACHE_HOLD_REQ,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN,
+ IPERF_CACHE_TX_REQ_STALL,
+ IPERF_CACHE_RX_REQ_STALL,
+ IPERF_CACHE_EVICTIONS,
+};
+
+/* FPMON Cache Control */
+struct feature_fme_ifpmon_ch_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd1:7; /* Reserved */
+ u8 freeze:1; /* Freeze if set to 1 */
+ u8 rsvd2:7; /* Reserved */
+ u8 cache_event:4; /* Select the cache event */
+ u8 cci_chsel:1; /* Select the channel */
+ u64 rsvd3:43; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Cache Counter */
+struct feature_fme_ifpmon_ch_ctr {
+ union {
+ u64 csr;
+ struct {
+ /* Cache Counter for even addresse */
+ u64 cache_counter:48;
+ u16 rsvd:12; /* Reserved */
+ /* Cache Event being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+enum iperf_fab_events {
+ IPERF_FAB_PCIE0_RD,
+ IPERF_FAB_PCIE0_WR,
+ IPERF_FAB_PCIE1_RD,
+ IPERF_FAB_PCIE1_WR,
+ IPERF_FAB_UPI_RD,
+ IPERF_FAB_UPI_WR,
+ IPERF_FAB_MMIO_RD,
+ IPERF_FAB_MMIO_WR,
+};
+
+#define FAB_DISABLE_FILTER 0
+#define FAB_ENABLE_FILTER 1
+
+/* FPMON FAB Control */
+struct feature_fme_ifpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_ifpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_ifpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+enum iperf_vtd_events {
+ IPERF_VTD_AFU_MEM_RD_TRANS,
+ IPERF_VTD_AFU_MEM_WR_TRANS,
+ IPERF_VTD_AFU_DEVTLB_RD_HIT,
+ IPERF_VTD_AFU_DEVTLB_WR_HIT,
+ IPERF_VTD_DEVTLB_4K_FILL,
+ IPERF_VTD_DEVTLB_2M_FILL,
+ IPERF_VTD_DEVTLB_1G_FILL,
+};
+
+/* VT-d control register */
+struct feature_fme_ifpmon_vtd_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d event counter */
+struct feature_fme_ifpmon_vtd_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+enum iperf_vtd_sip_events {
+ IPERF_VTD_SIP_IOTLB_4K_HIT,
+ IPERF_VTD_SIP_IOTLB_2M_HIT,
+ IPERF_VTD_SIP_IOTLB_1G_HIT,
+ IPERF_VTD_SIP_SLPWC_L3_HIT,
+ IPERF_VTD_SIP_SLPWC_L4_HIT,
+ IPERF_VTD_SIP_RCC_HIT,
+ IPERF_VTD_SIP_IOTLB_4K_MISS,
+ IPERF_VTD_SIP_IOTLB_2M_MISS,
+ IPERF_VTD_SIP_IOTLB_1G_MISS,
+ IPERF_VTD_SIP_SLPWC_L3_MISS,
+ IPERF_VTD_SIP_SLPWC_L4_MISS,
+ IPERF_VTD_SIP_RCC_MISS,
+};
+
+/* VT-d SIP control register */
+struct feature_fme_ifpmon_vtd_sip_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d SIP event counter */
+struct feature_fme_ifpmon_vtd_sip_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+/* FME IPERF FEATURE */
+struct feature_fme_iperf {
+ struct feature_header header;
+ struct feature_fme_ifpmon_ch_ctl ch_ctl;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr0;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr1;
+ struct feature_fme_ifpmon_fab_ctl fab_ctl;
+ struct feature_fme_ifpmon_fab_ctr fab_ctr;
+ struct feature_fme_ifpmon_clk_ctr clk;
+ struct feature_fme_ifpmon_vtd_ctl vtd_ctl;
+ struct feature_fme_ifpmon_vtd_ctr vtd_ctr;
+ struct feature_fme_ifpmon_vtd_sip_ctl vtd_sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr vtd_sip_ctr;
+};
+
+enum dperf_fab_events {
+ DPERF_FAB_PCIE0_RD,
+ DPERF_FAB_PCIE0_WR,
+ DPERF_FAB_MMIO_RD = 6,
+ DPERF_FAB_MMIO_WR,
+};
+
+/* FPMON FAB Control */
+struct feature_fme_dfpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_dfpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_dfpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+/* FME DPERF FEATURE */
+struct feature_fme_dperf {
+ struct feature_header header;
+ u64 rsvd[3];
+ struct feature_fme_dfpmon_fab_ctl fab_ctl;
+ struct feature_fme_dfpmon_fab_ctr fab_ctr;
+ struct feature_fme_dfpmon_clk_ctr clk;
+};
+
+struct feature_fme_error0 {
+#define FME_ERROR0_MASK 0xFFUL
+#define FME_ERROR0_MASK_DEFAULT 0x40UL /* pcode workaround */
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_err:1; /* Fabric error */
+ u8 fabfifo_overflow:1; /* Fabric fifo overflow */
+ u8 kticdc_parity_err:2;/* KTI CDC Parity Error */
+ u8 iommu_parity_err:1; /* IOMMU Parity error */
+ /* AFU PF/VF access mismatch detected */
+ u8 afu_acc_mode_err:1;
+ u8 mbp_err:1; /* Indicates an MBP event */
+ /* PCIE0 CDC Parity Error */
+ u8 pcie0cdc_parity_err:5;
+ /* PCIE1 CDC Parity Error */
+ u8 pcie1cdc_parity_err:5;
+ /* CVL CDC Parity Error */
+ u8 cvlcdc_parity_err:3;
+ u64 rsvd:44; /* Reserved */
+ };
+ };
+};
+
+/* PCIe0 Error Status register */
+struct feature_fme_pcie0_error {
+#define FME_PCIE0_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:52; /* Reserved */
+ u8 vfnumb_err:1; /* Number of error VF */
+ u8 funct_type_err:1; /* Virtual (1) or Physical */
+ };
+ };
+};
+
+/* PCIe1 Error Status register */
+struct feature_fme_pcie1_error {
+#define FME_PCIE1_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:54; /* Reserved */
+ };
+ };
+};
+
+/* FME First Error register */
+struct feature_fme_first_error {
+#define FME_FIRST_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered first
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered first
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* FME Next Error register */
+struct feature_fme_next_error {
+#define FME_NEXT_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered second
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered second
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* RAS Non Fatal Error Status register */
+struct feature_fme_ras_nonfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* thremal threshold AP1 */
+ u8 temp_thresh_ap1:1;
+ /* thremal threshold AP2 */
+ u8 temp_thresh_ap2:1;
+ u8 pcie_error:1; /* pcie Error */
+ u8 portfatal_error:1; /* port fatal error */
+ u8 proc_hot:1; /* Indicates a ProcHot event */
+ /* Indicates an AFU PF/VF access mismatch */
+ u8 afu_acc_mode_err:1;
+ /* Injected nonfata Error */
+ u8 injected_nonfata_err:1;
+ u8 rsvd1:2;
+ /* Temperature threshold triggered AP6*/
+ u8 temp_thresh_AP6:1;
+ /* Power threshold triggered AP1 */
+ u8 power_thresh_AP1:1;
+ /* Power threshold triggered AP2 */
+ u8 power_thresh_AP2:1;
+ /* Indicates a MBP event */
+ u8 mbp_err:1;
+ u64 rsvd2:51; /* Reserved */
+ };
+ };
+};
+
+/* RAS Catastrophic Fatal Error Status register */
+struct feature_fme_ras_catfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* KTI Link layer error detected */
+ u8 ktilink_fatal_err:1;
+ /* tag-n-cache error detected */
+ u8 tagcch_fatal_err:1;
+ /* CCI error detected */
+ u8 cci_fatal_err:1;
+ /* KTI Protocol error detected */
+ u8 ktiprpto_fatal_err:1;
+ /* Fatal DRAM error detected */
+ u8 dram_fatal_err:1;
+ /* IOMMU detected */
+ u8 iommu_fatal_err:1;
+ /* Fabric Fatal Error */
+ u8 fabric_fatal_err:1;
+ /* PCIe possion Error */
+ u8 pcie_poison_err:1;
+ /* Injected fatal Error */
+ u8 inject_fata_err:1;
+ /* Catastrophic CRC Error */
+ u8 crc_catast_err:1;
+ /* Catastrophic Thermal Error */
+ u8 therm_catast_err:1;
+ /* Injected Catastrophic Error */
+ u8 injected_catast_err:1;
+ u64 rsvd:52;
+ };
+ };
+};
+
+/* RAS Error injection register */
+struct feature_fme_ras_error_inj {
+#define FME_RAS_ERROR_INJ_MASK 0x7UL
+ union {
+ u64 csr;
+ struct {
+ u8 catast_error:1; /* Catastrophic error flag */
+ u8 fatal_error:1; /* Fatal error flag */
+ u8 nonfatal_error:1; /* NonFatal error flag */
+ u64 rsvd:61; /* Reserved */
+ };
+ };
+};
+
+/* FME error capabilities */
+struct feature_fme_error_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:51; /* Reserved */
+ };
+ };
+};
+
+/* FME ERR FEATURE */
+struct feature_fme_err {
+ struct feature_header header;
+ struct feature_fme_error0 fme_err_mask;
+ struct feature_fme_error0 fme_err;
+ struct feature_fme_pcie0_error pcie0_err_mask;
+ struct feature_fme_pcie0_error pcie0_err;
+ struct feature_fme_pcie1_error pcie1_err_mask;
+ struct feature_fme_pcie1_error pcie1_err;
+ struct feature_fme_first_error fme_first_err;
+ struct feature_fme_next_error fme_next_err;
+ struct feature_fme_ras_nonfaterror ras_nonfat_mask;
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+ struct feature_fme_ras_catfaterror ras_catfat_mask;
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+ struct feature_fme_ras_error_inj ras_error_inj;
+ struct feature_fme_error_capability fme_err_capability;
+};
+
+/* FME Partial Reconfiguration Control */
+struct feature_fme_pr_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 pr_reset:1; /* Reset PR Engine */
+ u8 rsvd3:3; /* Reserved */
+ u8 pr_reset_ack:1; /* Reset PR Engine Ack */
+ u8 rsvd4:3; /* Reserved */
+ u8 pr_regionid:2; /* PR Region ID */
+ u8 rsvd1:2; /* Reserved */
+ u8 pr_start_req:1; /* PR Start Request */
+ u8 pr_push_complete:1; /* PR Data push complete */
+ u8 pr_kind:1; /* PR Data push complete */
+ u32 rsvd:17; /* Reserved */
+ u32 config_data; /* Config data TBD */
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Status */
+struct feature_fme_pr_status {
+ union {
+ u64 csr;
+ struct {
+ u16 pr_credit:9; /* PR Credits */
+ u8 rsvd2:7; /* Reserved */
+ u8 pr_status:1; /* PR status */
+ u8 rsvd:3; /* Reserved */
+ /* Altra PR Controller Block status */
+ u8 pr_controller_status:3;
+ u8 rsvd1:1; /* Reserved */
+ u8 pr_host_status:4; /* PR Host status */
+ u8 rsvd3:4; /* Reserved */
+ /* Security Block Status fields (TBD) */
+ u32 security_bstatus;
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Data */
+struct feature_fme_pr_data {
+ union {
+ u64 csr; /* PR data from the raw-binary file */
+ struct {
+ /* PR data from the raw-binary file */
+ u32 pr_data_raw;
+ u32 rsvd;
+ };
+ };
+};
+
+/* FME PR Public Key */
+struct feature_fme_pr_key {
+ u64 key; /* FME PR Public Hash */
+};
+
+/* FME PR FEATURE */
+struct feature_fme_pr {
+ struct feature_header header;
+ /*Partial Reconfiguration control */
+ struct feature_fme_pr_ctl ccip_fme_pr_control;
+
+ /* Partial Reconfiguration Status */
+ struct feature_fme_pr_status ccip_fme_pr_status;
+
+ /* Partial Reconfiguration data */
+ struct feature_fme_pr_data ccip_fme_pr_data;
+
+ /* Partial Reconfiguration data */
+ u64 ccip_fme_pr_err;
+
+ u64 rsvd1[3];
+
+ /* Partial Reconfiguration data registers */
+ u64 fme_pr_data1;
+ u64 fme_pr_data2;
+ u64 fme_pr_data3;
+ u64 fme_pr_data4;
+ u64 fme_pr_data5;
+ u64 fme_pr_data6;
+ u64 fme_pr_data7;
+ u64 fme_pr_data8;
+
+ u64 rsvd2[5];
+
+ /* PR Interface ID */
+ u64 fme_pr_intfc_id_l;
+ u64 fme_pr_intfc_id_h;
+
+ /* MSIX filed to be Added */
+};
+
+/* FME HSSI Control */
+struct feature_fme_hssi_eth_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u16 address:16; /* HSSI address */
+ /*
+ * HSSI comamnd
+ * 0x0 - No request
+ * 0x08 - SW register RD request
+ * 0x10 - SW register WR request
+ * 0x40 - Auxiliar bus RD request
+ * 0x80 - Auxiliar bus WR request
+ */
+ u16 cmd:16;
+ };
+ };
+};
+
+/* FME HSSI Status */
+struct feature_fme_hssi_eth_stat {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u8 acknowledge:1; /* HSSI acknowledge */
+ u8 spare:1; /* HSSI spare */
+ u32 rsvd:30; /* Reserved */
+ };
+ };
+};
+
+/* FME HSSI FEATURE */
+struct feature_fme_hssi {
+ struct feature_header header;
+ struct feature_fme_hssi_eth_ctrl hssi_control;
+ struct feature_fme_hssi_eth_stat hssi_status;
+};
+
+#define PORT_ERR_MASK 0xfff0703ff001f
+struct feature_port_err_key {
+ union {
+ u64 csr;
+ struct {
+ /* Tx Channel0: Overflow */
+ u8 tx_ch0_overflow:1;
+ /* Tx Channel0: Invalid request encoding */
+ u8 tx_ch0_invaldreq :1;
+ /* Tx Channel0: Request with cl_len=3 not supported */
+ u8 tx_ch0_cl_len3:1;
+ /* Tx Channel0: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch0_cl_len2:1;
+ /* Tx Channel0: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch0_cl_len4:1;
+
+ u16 rsvd1:4; /* Reserved */
+
+ /* AFU MMIO RD received while PORT is in reset */
+ u8 mmio_rd_whilerst:1;
+ /* AFU MMIO WR received while PORT is in reset */
+ u8 mmio_wr_whilerst:1;
+
+ u16 rsvd2:5; /* Reserved */
+
+ /* Tx Channel1: Overflow */
+ u8 tx_ch1_overflow:1;
+ /* Tx Channel1: Invalid request encoding */
+ u8 tx_ch1_invaldreq:1;
+ /* Tx Channel1: Request with cl_len=3 not supported */
+ u8 tx_ch1_cl_len3:1;
+ /* Tx Channel1: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch1_cl_len2:1;
+ /* Tx Channel1: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch1_cl_len4:1;
+
+ /* Tx Channel1: Insufficient data payload */
+ u8 tx_ch1_insuff_data:1;
+ /* Tx Channel1: Data payload overrun */
+ u8 tx_ch1_data_overrun:1;
+ /* Tx Channel1 : Incorrect address */
+ u8 tx_ch1_incorr_addr:1;
+ /* Tx Channel1 : NON-Zero SOP Detected */
+ u8 tx_ch1_nzsop:1;
+ /* Tx Channel1 : Illegal VC_SEL, atomic request VLO */
+ u8 tx_ch1_illegal_vcsel:1;
+
+ u8 rsvd3:6; /* Reserved */
+
+ /* MMIO Read Timeout in AFU */
+ u8 mmioread_timeout:1;
+
+ /* Tx Channel2: FIFO Overflow */
+ u8 tx_ch2_fifo_overflow:1;
+
+ /* MMIO read is not matching pending request */
+ u8 unexp_mmio_resp:1;
+
+ u8 rsvd4:5; /* Reserved */
+
+ /* Number of pending Requests: counter overflow */
+ u8 tx_req_counter_overflow:1;
+ /* Req with Address violating SMM Range */
+ u8 llpr_smrr_err:1;
+ /* Req with Address violating second SMM Range */
+ u8 llpr_smrr2_err:1;
+ /* Req with Address violating ME Stolen message */
+ u8 llpr_mesg_err:1;
+ /* Req with Address violating Generic Protected Range */
+ u8 genprot_range_err:1;
+ /* Req with Address violating Legacy Range low */
+ u8 legrange_low_err:1;
+ /* Req with Address violating Legacy Range High */
+ u8 legrange_high_err:1;
+ /* Req with Address violating VGA memory range */
+ u8 vgmem_range_err:1;
+ u8 page_fault_err:1; /* Page fault */
+ u8 pmr_err:1; /* PMR Error */
+ u8 ap6_event:1; /* AP6 event */
+ /* VF FLR detected on Port with PF access control */
+ u8 vfflr_access_err:1;
+ u16 rsvd5:12; /* Reserved */
+ };
+ };
+};
+
+/* Port first error register, not contain all error bits in error register. */
+struct feature_port_first_err_key {
+ union {
+ u64 csr;
+ struct {
+ u8 tx_ch0_overflow:1;
+ u8 tx_ch0_invaldreq :1;
+ u8 tx_ch0_cl_len3:1;
+ u8 tx_ch0_cl_len2:1;
+ u8 tx_ch0_cl_len4:1;
+ u8 rsvd1:4; /* Reserved */
+ u8 mmio_rd_whilerst:1;
+ u8 mmio_wr_whilerst:1;
+ u8 rsvd2:5; /* Reserved */
+ u8 tx_ch1_overflow:1;
+ u8 tx_ch1_invaldreq:1;
+ u8 tx_ch1_cl_len3:1;
+ u8 tx_ch1_cl_len2:1;
+ u8 tx_ch1_cl_len4:1;
+ u8 tx_ch1_insuff_data:1;
+ u8 tx_ch1_data_overrun:1;
+ u8 tx_ch1_incorr_addr:1;
+ u8 tx_ch1_nzsop:1;
+ u8 tx_ch1_illegal_vcsel:1;
+ u8 rsvd3:6; /* Reserved */
+ u8 mmioread_timeout:1;
+ u8 tx_ch2_fifo_overflow:1;
+ u8 rsvd4:6; /* Reserved */
+ u8 tx_req_counter_overflow:1;
+ u32 rsvd5:23; /* Reserved */
+ };
+ };
+};
+
+/* Port malformed Req0 */
+struct feature_port_malformed_req0 {
+ u64 header_lsb;
+};
+
+/* Port malformed Req1 */
+struct feature_port_malformed_req1 {
+ u64 header_msb;
+};
+
+/* Port debug register */
+struct feature_port_debug {
+ u64 port_debug;
+};
+
+/* Port error capabilities */
+struct feature_port_err_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:51; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE ERROR */
+struct feature_port_error {
+ struct feature_header header;
+ struct feature_port_err_key error_mask;
+ struct feature_port_err_key port_error;
+ struct feature_port_first_err_key port_first_error;
+ struct feature_port_malformed_req0 malreq0;
+ struct feature_port_malformed_req1 malreq1;
+ struct feature_port_debug port_debug;
+ struct feature_port_err_capability error_capability;
+};
+
+/* Port UMSG Capability */
+struct feature_port_umsg_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Number of umsg allocated to this port */
+ u8 umsg_allocated;
+ /* Enable / Disable UMsg engine for this port */
+ u8 umsg_enable:1;
+ /* Usmg initialization status */
+ u8 umsg_init_complete:1;
+ /* IOMMU can not translate the umsg base address */
+ u8 umsg_trans_error:1;
+ u64 rsvd:53; /* Reserved */
+ };
+ };
+};
+
+/* Port UMSG base address */
+struct feature_port_umsg_baseaddr {
+ union {
+ u64 csr;
+ struct {
+ u64 base_addr:48; /* 48 bit physical address */
+ u16 rsvd; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_umsg_mode {
+ union {
+ u64 csr;
+ struct {
+ u32 umsg_hint_enable; /* UMSG hint enable/disable */
+ u32 rsvd; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE UMSG */
+struct feature_port_umsg {
+ struct feature_header header;
+ struct feature_port_umsg_cap capability;
+ struct feature_port_umsg_baseaddr baseaddr;
+ struct feature_port_umsg_mode mode;
+};
+
+#define UMSG_EN_POLL_INVL 10 /* us */
+#define UMSG_EN_POLL_TIMEOUT 1000 /* us */
+
+/* Port UINT Capability */
+struct feature_port_uint_cap {
+ union {
+ u64 csr;
+ struct {
+ u16 intr_num:12; /* Supported interrupts num */
+ /* First MSI-X vector table entry number */
+ u16 first_vec_num:12;
+ u64 rsvd:40;
+ };
+ };
+};
+
+/* PORT FEATURE UINT */
+struct feature_port_uint {
+ struct feature_header header;
+ struct feature_port_uint_cap capability;
+};
+
+/* STP region supports mmap operation, so use page aligned size. */
+#define PORT_FEATURE_STP_REGION_SIZE \
+ IFPGA_PAGE_ALIGN(sizeof(struct feature_port_stp))
+
+/* Port STP status register (for debug only)*/
+struct feature_port_stp_status {
+ union {
+ u64 csr;
+ struct {
+ /* SLD Hub end-point read/write timeout */
+ u8 sld_ep_timeout:1;
+ /* Remote STP in reset/disable */
+ u8 rstp_disabled:1;
+ u8 unsupported_read:1;
+ /* MMIO timeout detected and faked with a response */
+ u8 mmio_timeout:1;
+ u8 txfifo_count:4;
+ u8 rxfifo_count:4;
+ u8 txfifo_overflow:1;
+ u8 txfifo_underflow:1;
+ u8 rxfifo_overflow:1;
+ u8 rxfifo_underflow:1;
+ /* Number of MMIO write requests */
+ u16 write_requests;
+ /* Number of MMIO read requests */
+ u16 read_requests;
+ /* Number of MMIO read responses */
+ u16 read_responses;
+ };
+ };
+};
+
+/*
+ * PORT FEATURE STP
+ * Most registers in STP region are not touched by driver, but mmapped to user
+ * space. So they are not defined in below data structure, as its actual size
+ * is 0x18c per spec.
+ */
+struct feature_port_stp {
+ struct feature_header header;
+ struct feature_port_stp_status stp_status;
+};
+
+/**
+ * enum fpga_pr_states - fpga PR states
+ * @FPGA_PR_STATE_UNKNOWN: can't determine state
+ * @FPGA_PR_STATE_WRITE_INIT: preparing FPGA for programming
+ * @FPGA_PR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
+ * @FPGA_PR_STATE_WRITE: writing image to FPGA
+ * @FPGA_PR_STATE_WRITE_ERR: Error while writing FPGA
+ * @FPGA_PR_STATE_WRITE_COMPLETE: Doing post programming steps
+ * @FPGA_PR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE
+ * @FPGA_PR_STATE_OPERATING: FPGA PR done
+ */
+enum fpga_pr_states {
+ /* canot determine state states */
+ FPGA_PR_STATE_UNKNOWN,
+
+ /* write sequence: init, write, complete */
+ FPGA_PR_STATE_WRITE_INIT,
+ FPGA_PR_STATE_WRITE_INIT_ERR,
+ FPGA_PR_STATE_WRITE,
+ FPGA_PR_STATE_WRITE_ERR,
+ FPGA_PR_STATE_WRITE_COMPLETE,
+ FPGA_PR_STATE_WRITE_COMPLETE_ERR,
+
+ /* FPGA PR done */
+ FPGA_PR_STATE_DONE,
+};
+
+/*
+ * FPGA Manager flags
+ * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ */
+#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
+
+/**
+ * struct fpga_pr_info - specific information to a FPGA PR
+ * @flags: boolean flags as defined above
+ * @pr_err: PR error code
+ * @state: fpga manager state
+ * @port_id: port id
+ */
+struct fpga_pr_info {
+ u32 flags;
+ u64 pr_err;
+ enum fpga_pr_states state;
+ int port_id;
+};
+
+#define DEFINE_FPGA_PR_ERR_MSG(_name_) \
+static const char * const _name_[] = { \
+ "PR operation error detected", \
+ "PR CRC error detected", \
+ "PR incompatiable bitstream error detected", \
+ "PR IP protocol error detected", \
+ "PR FIFO overflow error detected", \
+ "PR timeout error detected", \
+ "PR secure load error detected", \
+}
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+#define PR_WAIT_TIMEOUT 15000000
+
+#define PR_HOST_STATUS_IDLE 0
+#define PR_MAX_ERR_NUM 7
+
+DEFINE_FPGA_PR_ERR_MSG(pr_err_msg);
+
+/*
+ * green bitstream header must be byte-packed to match the
+ * real file format.
+ */
+struct bts_header {
+ u64 guid_h;
+ u64 guid_l;
+ u32 metadata_len;
+};
+
+#define GBS_GUID_H 0x414750466e6f6558
+#define GBS_GUID_L 0x31303076534247b7
+#define is_valid_bts(bts_hdr) \
+ (((bts_hdr)->guid_h == GBS_GUID_H) && \
+ ((bts_hdr)->guid_l == GBS_GUID_L))
+
+#pragma pack(pop)
+#endif /* _BASE_IFPGA_DEFINES_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
new file mode 100644
index 00000000..f0939dc3
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
@@ -0,0 +1,821 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "ifpga_api.h"
+
+#include "ifpga_hw.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+struct build_feature_devs_info {
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_afu_info *acc_info;
+
+ void *fiu;
+ enum fpga_id_type current_type;
+ int current_port_id;
+
+ void *ioaddr;
+ void *ioend;
+ uint64_t phys_addr;
+ int current_bar;
+
+ void *pfme_hdr;
+
+ struct ifpga_hw *hw;
+};
+
+struct feature_info {
+ const char *name;
+ u32 resource_size;
+ int feature_index;
+ int revision_id;
+ unsigned int vec_start;
+ unsigned int vec_cnt;
+
+ struct feature_ops *ops;
+};
+
+/* indexed by fme feature IDs which are defined in 'enum fme_feature_id'. */
+static struct feature_info fme_features[] = {
+ {
+ .name = FME_FEATURE_HEADER,
+ .resource_size = sizeof(struct feature_fme_header),
+ .feature_index = FME_FEATURE_ID_HEADER,
+ .revision_id = FME_HEADER_REVISION,
+ .ops = &fme_hdr_ops,
+ },
+ {
+ .name = FME_FEATURE_THERMAL_MGMT,
+ .resource_size = sizeof(struct feature_fme_thermal),
+ .feature_index = FME_FEATURE_ID_THERMAL_MGMT,
+ .revision_id = FME_THERMAL_MGMT_REVISION,
+ .ops = &fme_thermal_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_POWER_MGMT,
+ .resource_size = sizeof(struct feature_fme_power),
+ .feature_index = FME_FEATURE_ID_POWER_MGMT,
+ .revision_id = FME_POWER_MGMT_REVISION,
+ .ops = &fme_power_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_IPERF,
+ .resource_size = sizeof(struct feature_fme_iperf),
+ .feature_index = FME_FEATURE_ID_GLOBAL_IPERF,
+ .revision_id = FME_GLOBAL_IPERF_REVISION,
+ .ops = &fme_global_iperf_ops,
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_ERR,
+ .resource_size = sizeof(struct feature_fme_err),
+ .feature_index = FME_FEATURE_ID_GLOBAL_ERR,
+ .revision_id = FME_GLOBAL_ERR_REVISION,
+ .ops = &fme_global_err_ops,
+ },
+ {
+ .name = FME_FEATURE_PR_MGMT,
+ .resource_size = sizeof(struct feature_fme_pr),
+ .feature_index = FME_FEATURE_ID_PR_MGMT,
+ .revision_id = FME_PR_MGMT_REVISION,
+ .ops = &fme_pr_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_HSSI_ETH,
+ .resource_size = sizeof(struct feature_fme_hssi),
+ .feature_index = FME_FEATURE_ID_HSSI_ETH,
+ .revision_id = FME_HSSI_ETH_REVISION
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_DPERF,
+ .resource_size = sizeof(struct feature_fme_dperf),
+ .feature_index = FME_FEATURE_ID_GLOBAL_DPERF,
+ .revision_id = FME_GLOBAL_DPERF_REVISION,
+ .ops = &fme_global_dperf_ops,
+ }
+};
+
+static struct feature_info port_features[] = {
+ {
+ .name = PORT_FEATURE_HEADER,
+ .resource_size = sizeof(struct feature_port_header),
+ .feature_index = PORT_FEATURE_ID_HEADER,
+ .revision_id = PORT_HEADER_REVISION,
+ .ops = &port_hdr_ops,
+ },
+ {
+ .name = PORT_FEATURE_ERR,
+ .resource_size = sizeof(struct feature_port_error),
+ .feature_index = PORT_FEATURE_ID_ERROR,
+ .revision_id = PORT_ERR_REVISION,
+ .ops = &port_error_ops,
+ },
+ {
+ .name = PORT_FEATURE_UMSG,
+ .resource_size = sizeof(struct feature_port_umsg),
+ .feature_index = PORT_FEATURE_ID_UMSG,
+ .revision_id = PORT_UMSG_REVISION,
+ },
+ {
+ .name = PORT_FEATURE_UINT,
+ .resource_size = sizeof(struct feature_port_uint),
+ .feature_index = PORT_FEATURE_ID_UINT,
+ .revision_id = PORT_UINT_REVISION,
+ .ops = &port_uint_ops,
+ },
+ {
+ .name = PORT_FEATURE_STP,
+ .resource_size = PORT_FEATURE_STP_REGION_SIZE,
+ .feature_index = PORT_FEATURE_ID_STP,
+ .revision_id = PORT_STP_REVISION,
+ .ops = &port_stp_ops,
+ },
+ {
+ .name = PORT_FEATURE_UAFU,
+ /* UAFU feature size should be read from PORT_CAP.MMIOSIZE.
+ * Will set uafu feature size while parse port device.
+ */
+ .resource_size = 0,
+ .feature_index = PORT_FEATURE_ID_UAFU,
+ .revision_id = PORT_UAFU_REVISION
+ },
+};
+
+static u64 feature_id(void __iomem *start)
+{
+ struct feature_header header;
+
+ header.csr = readq(start);
+
+ switch (header.type) {
+ case FEATURE_TYPE_FIU:
+ return FEATURE_ID_HEADER;
+ case FEATURE_TYPE_PRIVATE:
+ return header.id;
+ case FEATURE_TYPE_AFU:
+ return FEATURE_ID_AFU;
+ }
+
+ WARN_ON(1);
+ return 0;
+}
+
+static int
+build_info_add_sub_feature(struct build_feature_devs_info *binfo,
+ struct feature_info *finfo, void __iomem *start)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct feature *feature = NULL;
+ int feature_idx = finfo->feature_index;
+ unsigned int vec_start = finfo->vec_start;
+ unsigned int vec_cnt = finfo->vec_cnt;
+ struct feature_irq_ctx *ctx = NULL;
+ int port_id, ret = 0;
+ unsigned int i;
+
+ if (binfo->current_type == FME_ID) {
+ feature = &hw->fme.sub_feature[feature_idx];
+ feature->parent = &hw->fme;
+ } else if (binfo->current_type == PORT_ID) {
+ port_id = binfo->current_port_id;
+ feature = &hw->port[port_id].sub_feature[feature_idx];
+ feature->parent = &hw->port[port_id];
+ } else {
+ return -EFAULT;
+ }
+
+ feature->state = IFPGA_FEATURE_ATTACHED;
+ feature->addr = start;
+ feature->id = feature_id(start);
+ feature->size = finfo->resource_size;
+ feature->name = finfo->name;
+ feature->revision = finfo->revision_id;
+ feature->ops = finfo->ops;
+ feature->phys_addr = binfo->phys_addr +
+ ((u8 *)start - (u8 *)binfo->ioaddr);
+
+ if (vec_cnt) {
+ if (vec_start + vec_cnt <= vec_start)
+ return -EINVAL;
+
+ ctx = zmalloc(sizeof(*ctx) * vec_cnt);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < vec_cnt; i++) {
+ ctx[i].eventfd = -1;
+ ctx[i].idx = vec_start + i;
+ }
+ }
+
+ feature->ctx = ctx;
+ feature->ctx_num = vec_cnt;
+ feature->vfio_dev_fd = binfo->pci_data->vfio_dev_fd;
+
+ return ret;
+}
+
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+ void __iomem *start, struct feature_info *finfo)
+{
+ struct feature_header *hdr = start;
+
+ if (finfo->revision_id != SKIP_REVISION_CHECK &&
+ hdr->revision > finfo->revision_id) {
+ dev_err(binfo, "feature %s revision :default:%x, now at:%x, mis-match.\n",
+ finfo->name, finfo->revision_id, hdr->revision);
+ }
+
+ return build_info_add_sub_feature(binfo, finfo, start);
+}
+
+/*
+ * UAFU GUID is dynamic as it can be changed after FME downloads different
+ * Green Bitstream to the port, so we treat the unknown GUIDs which are
+ * attached on port's feature list as UAFU.
+ */
+static bool feature_is_UAFU(struct build_feature_devs_info *binfo)
+{
+ if (binfo->current_type != PORT_ID)
+ return false;
+
+ return true;
+}
+
+static int parse_feature_port_uafu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+ struct ifpga_afu_info *info;
+ void *start = (void *)hdr;
+ int ret;
+
+ if (port_features[id].resource_size) {
+ ret = create_feature_instance(binfo, hdr, &port_features[id]);
+ } else {
+ dev_err(binfo, "the uafu feature header is mis-configured.\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* FIXME: need to figure out a better name */
+ info = malloc(sizeof(*info));
+ if (!info)
+ return -ENOMEM;
+
+ info->region[0].addr = start;
+ info->region[0].phys_addr = binfo->phys_addr +
+ (uint8_t *)start - (uint8_t *)binfo->ioaddr;
+ info->region[0].len = port_features[id].resource_size;
+ port_features[id].resource_size = 0;
+ info->num_regions = 1;
+
+ binfo->acc_info = info;
+
+ return ret;
+}
+
+static int parse_feature_afus(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ int ret;
+ struct feature_afu_header *afu_hdr, header;
+ u8 __iomem *start;
+ u8 __iomem *end = binfo->ioend;
+
+ start = (u8 __iomem *)hdr;
+ for (; start < end; start += header.next_afu) {
+ if ((unsigned int)(end - start) <
+ (unsigned int)(sizeof(*afu_hdr) + sizeof(*hdr)))
+ return -EINVAL;
+
+ hdr = (struct feature_header *)start;
+ afu_hdr = (struct feature_afu_header *)(hdr + 1);
+ header.csr = readq(&afu_hdr->csr);
+
+ if (feature_is_UAFU(binfo)) {
+ ret = parse_feature_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+ }
+
+ if (!header.next_afu)
+ break;
+ }
+
+ return 0;
+}
+
+/* create and register proper private data */
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct ifpga_afu_info *info = binfo->acc_info;
+ struct ifpga_hw *hw = binfo->hw;
+ struct opae_manager *mgr;
+ struct opae_bridge *br;
+ struct opae_accelerator *acc;
+
+ if (!binfo->fiu)
+ return 0;
+
+ if (binfo->current_type == PORT_ID) {
+ /* return error if no valid acc info data structure */
+ if (!info)
+ return -EFAULT;
+
+ br = opae_bridge_alloc(hw->adapter->name, &ifpga_br_ops,
+ binfo->fiu);
+ if (!br)
+ return -ENOMEM;
+
+ br->id = binfo->current_port_id;
+
+ /* update irq info */
+ info->num_irqs = port_features[PORT_FEATURE_ID_UINT].vec_cnt;
+
+ acc = opae_accelerator_alloc(hw->adapter->name,
+ &ifpga_acc_ops, info);
+ if (!acc) {
+ opae_bridge_free(br);
+ return -ENOMEM;
+ }
+
+ acc->br = br;
+ acc->index = br->id;
+
+ opae_adapter_add_acc(hw->adapter, acc);
+
+ } else if (binfo->current_type == FME_ID) {
+ mgr = opae_manager_alloc(hw->adapter->name, &ifpga_mgr_ops,
+ binfo->fiu);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->adapter = hw->adapter;
+ hw->adapter->mgr = mgr;
+ }
+
+ binfo->fiu = NULL;
+
+ return 0;
+}
+
+static int
+build_info_create_dev(struct build_feature_devs_info *binfo,
+ enum fpga_id_type type, unsigned int index)
+{
+ int ret;
+
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
+
+ binfo->current_type = type;
+
+ if (type == FME_ID) {
+ binfo->fiu = &binfo->hw->fme;
+ } else if (type == PORT_ID) {
+ binfo->fiu = &binfo->hw->port[index];
+ binfo->current_port_id = index;
+ }
+
+ return 0;
+}
+
+static int parse_feature_fme(struct build_feature_devs_info *binfo,
+ struct feature_header *start)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ int ret;
+
+ ret = build_info_create_dev(binfo, FME_ID, 0);
+ if (ret)
+ return ret;
+
+ /* Update FME states */
+ fme->state = IFPGA_FME_IMPLEMENTED;
+ fme->parent = hw;
+ spinlock_init(&fme->lock);
+
+ return create_feature_instance(binfo, start,
+ &fme_features[FME_FEATURE_ID_HEADER]);
+}
+
+static int parse_feature_port(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_port_hw *port;
+ unsigned int port_id;
+ int ret;
+
+ /* Get current port's id */
+ port_hdr = (struct feature_port_header *)start;
+ capability.csr = readq(&port_hdr->capability);
+ port_id = capability.port_number;
+
+ ret = build_info_create_dev(binfo, PORT_ID, port_id);
+ if (ret)
+ return ret;
+
+ /*found a Port device*/
+ port = &hw->port[port_id];
+ port->port_id = binfo->current_port_id;
+ port->parent = hw;
+ port->state = IFPGA_PORT_ATTACHED;
+ spinlock_init(&port->lock);
+
+ return create_feature_instance(binfo, start,
+ &port_features[PORT_FEATURE_ID_HEADER]);
+}
+
+static void enable_port_uafu(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+ struct ifpga_port_hw *port = &binfo->hw->port[binfo->current_port_id];
+
+ port_hdr = (struct feature_port_header *)start;
+ capability.csr = readq(&port_hdr->capability);
+ port_features[id].resource_size = (capability.mmio_size << 10);
+
+ /*
+ * From spec, to Enable UAFU, we should reset related port,
+ * or the whole mmio space in this UAFU will be invalid
+ */
+ if (port_features[id].resource_size)
+ fpga_port_reset(port);
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ struct feature_fiu_header *fiu_hdr, fiu_header;
+ u8 __iomem *start = (u8 __iomem *)hdr;
+ int ret;
+
+ header.csr = readq(hdr);
+
+ switch (header.id) {
+ case FEATURE_FIU_ID_FME:
+ ret = parse_feature_fme(binfo, hdr);
+ binfo->pfme_hdr = hdr;
+ if (ret)
+ return ret;
+ break;
+ case FEATURE_FIU_ID_PORT:
+ ret = parse_feature_port(binfo, hdr);
+ enable_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+
+ /* Check Port FIU's next_afu pointer to User AFU DFH */
+ fiu_hdr = (struct feature_fiu_header *)(hdr + 1);
+ fiu_header.csr = readq(&fiu_hdr->csr);
+
+ if (fiu_header.next_afu) {
+ start += fiu_header.next_afu;
+ ret = parse_feature_afus(binfo,
+ (struct feature_header *)start);
+ if (ret)
+ return ret;
+ } else {
+ dev_info(binfo, "No AFUs detected on Port\n");
+ }
+
+ break;
+ default:
+ dev_info(binfo, "FIU TYPE %d is not supported yet.\n",
+ header.id);
+ }
+
+ return 0;
+}
+
+static void parse_feature_irqs(struct build_feature_devs_info *binfo,
+ void __iomem *start, struct feature_info *finfo)
+{
+ finfo->vec_start = 0;
+ finfo->vec_cnt = 0;
+
+ UNUSED(binfo);
+
+ if (!strcmp(finfo->name, PORT_FEATURE_UINT)) {
+ struct feature_port_uint *port_uint = start;
+ struct feature_port_uint_cap uint_cap;
+
+ uint_cap.csr = readq(&port_uint->capability);
+ if (uint_cap.intr_num) {
+ finfo->vec_start = uint_cap.first_vec_num;
+ finfo->vec_cnt = uint_cap.intr_num;
+ } else {
+ dev_debug(binfo, "UAFU doesn't support interrupt\n");
+ }
+ } else if (!strcmp(finfo->name, PORT_FEATURE_ERR)) {
+ struct feature_port_error *port_err = start;
+ struct feature_port_err_capability port_err_cap;
+
+ port_err_cap.csr = readq(&port_err->error_capability);
+ if (port_err_cap.support_intr) {
+ finfo->vec_start = port_err_cap.intr_vector_num;
+ finfo->vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "Port error doesn't support interrupt\n");
+ }
+
+ } else if (!strcmp(finfo->name, FME_FEATURE_GLOBAL_ERR)) {
+ struct feature_fme_err *fme_err = start;
+ struct feature_fme_error_capability fme_err_cap;
+
+ fme_err_cap.csr = readq(&fme_err->fme_err_capability);
+ if (fme_err_cap.support_intr) {
+ finfo->vec_start = fme_err_cap.intr_vector_num;
+ finfo->vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "FME error doesn't support interrupt\n");
+ }
+ }
+}
+
+static int parse_feature_fme_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+
+ header.csr = readq(hdr);
+
+ if (header.id >= ARRAY_SIZE(fme_features)) {
+ dev_err(binfo, "FME feature id %x is not supported yet.\n",
+ header.id);
+ return 0;
+ }
+
+ parse_feature_irqs(binfo, hdr, &fme_features[header.id]);
+
+ return create_feature_instance(binfo, hdr, &fme_features[header.id]);
+}
+
+static int parse_feature_port_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ enum port_feature_id id;
+
+ header.csr = readq(hdr);
+ /*
+ * the region of port feature id is [0x10, 0x13], + 1 to reserve 0
+ * which is dedicated for port-hdr.
+ */
+ id = (header.id & 0x000f) + 1;
+
+ if (id >= ARRAY_SIZE(port_features)) {
+ dev_err(binfo, "Port feature id %x is not supported yet.\n",
+ header.id);
+ return 0;
+ }
+
+ parse_feature_irqs(binfo, hdr, &port_features[id]);
+
+ return create_feature_instance(binfo, hdr, &port_features[id]);
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+
+ header.csr = readq(hdr);
+
+ switch (binfo->current_type) {
+ case FME_ID:
+ return parse_feature_fme_private(binfo, hdr);
+ case PORT_ID:
+ return parse_feature_port_private(binfo, hdr);
+ default:
+ dev_err(binfo, "private feature %x belonging to AFU %d (unknown_type) is not supported yet.\n",
+ header.id, binfo->current_type);
+ }
+ return 0;
+}
+
+static int parse_feature(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ int ret = 0;
+
+ header.csr = readq(hdr);
+
+ switch (header.type) {
+ case FEATURE_TYPE_AFU:
+ ret = parse_feature_afus(binfo, hdr);
+ break;
+ case FEATURE_TYPE_PRIVATE:
+ ret = parse_feature_private(binfo, hdr);
+ break;
+ case FEATURE_TYPE_FIU:
+ ret = parse_feature_fiu(binfo, hdr);
+ break;
+ default:
+ dev_err(binfo, "Feature Type %x is not supported.\n",
+ hdr->type);
+ };
+
+ return ret;
+}
+
+static int
+parse_feature_list(struct build_feature_devs_info *binfo, u8 __iomem *start)
+{
+ struct feature_header *hdr, header;
+ u8 __iomem *end = (u8 __iomem *)binfo->ioend;
+ int ret = 0;
+
+ for (; start < end; start += header.next_header_offset) {
+ if ((unsigned int)(end - start) < (unsigned int)sizeof(*hdr)) {
+ dev_err(binfo, "The region is too small to contain a feature.\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ hdr = (struct feature_header *)start;
+ ret = parse_feature(binfo, hdr);
+ if (ret)
+ return ret;
+
+ header.csr = readq(hdr);
+ if (!header.next_header_offset)
+ break;
+ }
+
+ return build_info_commit_dev(binfo);
+}
+
+/* switch the memory mapping to BAR# @bar */
+static int parse_switch_to(struct build_feature_devs_info *binfo, int bar)
+{
+ struct opae_adapter_data_pci *pci_data = binfo->pci_data;
+
+ if (!pci_data->region[bar].addr)
+ return -ENOMEM;
+
+ binfo->ioaddr = pci_data->region[bar].addr;
+ binfo->ioend = (u8 __iomem *)binfo->ioaddr + pci_data->region[bar].len;
+ binfo->phys_addr = pci_data->region[bar].phys_addr;
+ binfo->current_bar = bar;
+
+ return 0;
+}
+
+static int parse_ports_from_fme(struct build_feature_devs_info *binfo)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_port port;
+ int i = 0, ret = 0;
+
+ if (!binfo->pfme_hdr) {
+ dev_info(binfo, "VF is detected.\n");
+ return ret;
+ }
+
+ fme_hdr = binfo->pfme_hdr;
+
+ do {
+ port.csr = readq(&fme_hdr->port[i]);
+ if (!port.port_implemented)
+ break;
+
+ /* skip port which only could be accessed via VF */
+ if (port.afu_access_control == FME_AFU_ACCESS_VF)
+ continue;
+
+ ret = parse_switch_to(binfo, port.port_bar);
+ if (ret)
+ break;
+
+ ret = parse_feature_list(binfo,
+ (u8 __iomem *)binfo->ioaddr +
+ port.port_offset);
+ if (ret)
+ break;
+ } while (++i < MAX_FPGA_PORT_NUM);
+
+ return ret;
+}
+
+static struct build_feature_devs_info *
+build_info_alloc_and_init(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+
+ binfo = zmalloc(sizeof(*binfo));
+ if (!binfo)
+ return binfo;
+
+ binfo->hw = hw;
+ binfo->pci_data = hw->pci_data;
+
+ /* fpga feature list starts from BAR 0 */
+ if (parse_switch_to(binfo, 0)) {
+ free(binfo);
+ return NULL;
+ }
+
+ return binfo;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+ free(binfo);
+}
+
+static void ifpga_print_device_feature_list(struct ifpga_hw *hw)
+{
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct ifpga_port_hw *port;
+ struct feature *feature;
+ int i, j;
+
+ dev_info(hw, "found fme_device, is in PF: %s\n",
+ is_ifpga_hw_pf(hw) ? "yes" : "no");
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: 0x%p - 0x%p - paddr: 0x%lx\n",
+ feature->name, feature->addr,
+ feature->addr + feature->size - 1,
+ (unsigned long)feature->phys_addr);
+ }
+
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++) {
+ port = &hw->port[i];
+
+ if (port->state != IFPGA_PORT_ATTACHED)
+ continue;
+
+ dev_info(hw, "port device: %d\n", port->port_id);
+
+ for (j = 0; j < PORT_FEATURE_ID_MAX; j++) {
+ feature = &port->sub_feature[j];
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: 0x%p - 0x%p - paddr:0x%lx\n",
+ feature->name,
+ feature->addr,
+ feature->addr +
+ feature->size - 1,
+ (unsigned long)feature->phys_addr);
+ }
+ }
+}
+
+int ifpga_bus_enumerate(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+ int ret;
+
+ binfo = build_info_alloc_and_init(hw);
+ if (!binfo)
+ return -ENOMEM;
+
+ ret = parse_feature_list(binfo, binfo->ioaddr);
+ if (ret)
+ goto exit;
+
+ ret = parse_ports_from_fme(binfo);
+ if (ret)
+ goto exit;
+
+ ifpga_print_device_feature_list(hw);
+
+exit:
+ build_info_free(binfo);
+ return ret;
+}
+
+int ifpga_bus_init(struct ifpga_hw *hw)
+{
+ int i;
+
+ fme_hw_init(&hw->fme);
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++)
+ port_hw_init(&hw->port[i]);
+
+ return 0;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
new file mode 100644
index 00000000..14131e32
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_ENUMERATE_H_
+#define _IFPGA_ENUMERATE_H_
+
+int ifpga_bus_init(struct ifpga_hw *hw);
+int ifpga_bus_enumerate(struct ifpga_hw *hw);
+
+#endif /* _IFPGA_ENUMERATE_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
new file mode 100644
index 00000000..be7ac9ee
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <sys/ioctl.h>
+
+#include "ifpga_feature_dev.h"
+
+/*
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * __fpga_port_enable function should only be used after __fpga_port_disable
+ * function.
+ */
+void __fpga_port_enable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ WARN_ON(!port->disable_count);
+
+ if (--port->disable_count != 0)
+ return;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x0;
+ writeq(control.csr, &port_hdr->control);
+}
+
+int __fpga_port_disable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ if (port->disable_count++ != 0)
+ return 0;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ /* Set port soft reset */
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x1;
+ writeq(control.csr, &port_hdr->control);
+
+ /*
+ * HW sets ack bit to 1 when all outstanding requests have been drained
+ * on this port and minimum soft reset pulse width has elapsed.
+ * Driver polls port_soft_reset_ack to determine if reset done by HW.
+ */
+ control.port_sftrst_ack = 1;
+
+ if (fpga_wait_register_field(port_sftrst_ack, control,
+ &port_hdr->control, RST_POLL_TIMEOUT,
+ RST_POLL_INVL)) {
+ dev_err(port, "timeout, fail to reset device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
+{
+ struct feature_port_header *port_hdr;
+ u64 guidl, guidh;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
+
+ spinlock_lock(&port->lock);
+ guidl = readq(&port_hdr->afu_header.guid.b[0]);
+ guidh = readq(&port_hdr->afu_header.guid.b[8]);
+ spinlock_unlock(&port->lock);
+
+ memcpy(uuid->b, &guidl, sizeof(u64));
+ memcpy(uuid->b + 8, &guidh, sizeof(u64));
+
+ return 0;
+}
+
+/* Mask / Unmask Port Errors by the Error Mask register. */
+void port_err_mask(struct ifpga_port_hw *port, bool mask)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key err_mask;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ if (mask)
+ err_mask.csr = PORT_ERR_MASK;
+ else
+ err_mask.csr = 0;
+
+ writeq(err_mask.csr, &port_err->error_mask);
+}
+
+/* Clear All Port Errors. */
+int port_err_clear(struct ifpga_port_hw *port, u64 err)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_error *port_err;
+ struct feature_port_err_key mask;
+ struct feature_port_first_err_key first;
+ struct feature_port_status status;
+ int ret = 0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ /*
+ * Clear All Port Errors
+ *
+ * - Check for AP6 State
+ * - Halt Port by keeping Port in reset
+ * - Set PORT Error mask to all 1 to mask errors
+ * - Clear all errors
+ * - Set Port mask to all 0 to enable errors
+ * - All errors start capturing new errors
+ * - Enable Port by pulling the port out of reset
+ */
+
+ /* If device is still in AP6 state, can not clear any error.*/
+ status.csr = readq(&port_hdr->status);
+ if (status.power_state == PORT_POWER_STATE_AP6) {
+ dev_err(dev, "Could not clear errors, device in AP6 state.\n");
+ return -EBUSY;
+ }
+
+ /* Halt Port by keeping Port in reset */
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ /* Mask all errors */
+ port_err_mask(port, true);
+
+ /* Clear errors if err input matches with current port errors.*/
+ mask.csr = readq(&port_err->port_error);
+
+ if (mask.csr == err) {
+ writeq(mask.csr, &port_err->port_error);
+
+ first.csr = readq(&port_err->port_first_error);
+ writeq(first.csr, &port_err->port_first_error);
+ } else {
+ ret = -EBUSY;
+ }
+
+ /* Clear mask */
+ port_err_mask(port, false);
+
+ /* Enable the Port by clear the reset */
+ __fpga_port_enable(port);
+
+ return ret;
+}
+
+int port_clear_error(struct ifpga_port_hw *port)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+
+ dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
+
+ return port_err_clear(port, error.csr);
+}
+
+void fme_hw_uinit(struct ifpga_fme_hw *fme)
+{
+ struct feature *feature;
+ int i;
+
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return;
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->uinit)
+ feature->ops->uinit(feature);
+ }
+}
+
+int fme_hw_init(struct ifpga_fme_hw *fme)
+{
+ struct feature *feature;
+ int i, ret;
+
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -EINVAL;
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->init) {
+ ret = feature->ops->init(feature);
+ if (ret) {
+ fme_hw_uinit(fme);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void port_hw_uinit(struct ifpga_port_hw *port)
+{
+ struct feature *feature;
+ int i;
+
+ for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+ feature = &port->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->uinit)
+ feature->ops->uinit(feature);
+ }
+}
+
+int port_hw_init(struct ifpga_port_hw *port)
+{
+ struct feature *feature;
+ int i, ret;
+
+ if (port->state == IFPGA_PORT_UNUSED)
+ return 0;
+
+ for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+ feature = &port->sub_feature[i];
+ if (feature->ops && feature->ops->init) {
+ ret = feature->ops->init(feature);
+ if (ret) {
+ port_hw_uinit(port);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
new file mode 100644
index 00000000..7a39a580
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_FEATURE_DEV_H_
+#define _IFPGA_FEATURE_DEV_H_
+
+#include "ifpga_hw.h"
+
+static inline struct ifpga_port_hw *
+get_port(struct ifpga_hw *hw, u32 port_id)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return NULL;
+
+ return &hw->port[port_id];
+}
+
+#define ifpga_for_each_fme_feature(hw, feature) \
+ for ((feature) = (hw)->sub_feature; \
+ (feature) < (hw)->sub_feature + (FME_FEATURE_ID_MAX); (feature)++)
+
+#define ifpga_for_each_port_feature(hw, feature) \
+ for ((feature) = (hw)->sub_feature; \
+ (feature) < (hw)->sub_feature + (PORT_FEATURE_ID_MAX); (feature)++)
+
+static inline struct feature *
+get_fme_feature_by_id(struct ifpga_fme_hw *fme, u64 id)
+{
+ struct feature *feature;
+
+ ifpga_for_each_fme_feature(fme, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline struct feature *
+get_port_feature_by_id(struct ifpga_port_hw *port, u64 id)
+{
+ struct feature *feature;
+
+ ifpga_for_each_port_feature(port, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline void *
+get_fme_feature_ioaddr_by_index(struct ifpga_fme_hw *fme, int index)
+{
+ return fme->sub_feature[index].addr;
+}
+
+static inline void *
+get_port_feature_ioaddr_by_index(struct ifpga_port_hw *port, int index)
+{
+ return port->sub_feature[index].addr;
+}
+
+static inline bool
+is_fme_feature_present(struct ifpga_fme_hw *fme, int index)
+{
+ return !!get_fme_feature_ioaddr_by_index(fme, index);
+}
+
+static inline bool
+is_port_feature_present(struct ifpga_port_hw *port, int index)
+{
+ return !!get_port_feature_ioaddr_by_index(port, index);
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid);
+
+int __fpga_port_disable(struct ifpga_port_hw *port);
+void __fpga_port_enable(struct ifpga_port_hw *port);
+
+static inline int fpga_port_disable(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_disable(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+static inline int fpga_port_enable(struct ifpga_port_hw *port)
+{
+ spinlock_lock(&port->lock);
+ __fpga_port_enable(port);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static inline int __fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ __fpga_port_enable(port);
+
+ return 0;
+}
+
+static inline int fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_reset(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status);
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set);
+
+int fme_hw_init(struct ifpga_fme_hw *fme);
+void fme_hw_uinit(struct ifpga_fme_hw *fme);
+void port_hw_uinit(struct ifpga_port_hw *port);
+int port_hw_init(struct ifpga_port_hw *port);
+int port_clear_error(struct ifpga_port_hw *port);
+void port_err_mask(struct ifpga_port_hw *port, bool mask);
+int port_err_clear(struct ifpga_port_hw *port, u64 err);
+
+extern struct feature_ops fme_hdr_ops;
+extern struct feature_ops fme_thermal_mgmt_ops;
+extern struct feature_ops fme_power_mgmt_ops;
+extern struct feature_ops fme_global_err_ops;
+extern struct feature_ops fme_pr_mgmt_ops;
+extern struct feature_ops fme_global_iperf_ops;
+extern struct feature_ops fme_global_dperf_ops;
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+
+/* This struct is used when parsing uafu irq_set */
+struct fpga_uafu_irq_set {
+ u32 start;
+ u32 count;
+ s32 *evtfds;
+};
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);
+
+extern struct feature_ops port_hdr_ops;
+extern struct feature_ops port_error_ops;
+extern struct feature_ops port_stp_ops;
+extern struct feature_ops port_uint_ops;
+
+/* help functions for feature ops */
+int fpga_msix_set_block(struct feature *feature, unsigned int start,
+ unsigned int count, s32 *fds);
+
+#endif /* _IFPGA_FEATURE_DEV_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
new file mode 100644
index 00000000..4be60c0c
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
@@ -0,0 +1,734 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PWR_THRESHOLD_MAX 0x7F
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+/* fme private feature head */
+static int fme_hdr_init(struct feature *feature)
+{
+ struct feature_fme_header *fme_hdr;
+
+ fme_hdr = (struct feature_fme_header *)feature->addr;
+
+ dev_info(NULL, "FME HDR Init.\n");
+ dev_info(NULL, "FME cap %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ return 0;
+}
+
+static void fme_hdr_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME HDR UInit.\n");
+}
+
+static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&fme_hdr->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *ports_num = fme_capability.num_ports;
+
+ return 0;
+}
+
+static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *cache_size = fme_capability.cache_size;
+
+ return 0;
+}
+
+static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *version = fme_capability.fabric_verid;
+
+ return 0;
+}
+
+static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *socket_id = fme_capability.socket_id;
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
+ u64 *bitstream_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_id = readq(&fme_hdr->bitstream_id);
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
+ u64 *bitstream_metadata)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_metadata = readq(&fme_hdr->bitstream_md);
+
+ return 0;
+}
+
+static int
+fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_HDR_PROP_REVISION:
+ return fme_hdr_get_revision(fme, &prop->data);
+ case FME_HDR_PROP_PORTS_NUM:
+ return fme_hdr_get_ports_num(fme, &prop->data);
+ case FME_HDR_PROP_CACHE_SIZE:
+ return fme_hdr_get_cache_size(fme, &prop->data);
+ case FME_HDR_PROP_VERSION:
+ return fme_hdr_get_version(fme, &prop->data);
+ case FME_HDR_PROP_SOCKET_ID:
+ return fme_hdr_get_socket_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_ID:
+ return fme_hdr_get_bitstream_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_METADATA:
+ return fme_hdr_get_bitstream_metadata(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_hdr_ops = {
+ .init = fme_hdr_init,
+ .uinit = fme_hdr_uinit,
+ .get_prop = fme_hdr_get_prop,
+};
+
+/* thermal management */
+static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1 = temp_threshold.tmp_thshold1;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres1 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres1 == 0) {
+ tmp_threshold.tmp_thshold1_enable = 0;
+ tmp_threshold.tmp_thshold1 = thres1;
+ } else {
+ tmp_threshold.tmp_thshold1_enable = 1;
+ tmp_threshold.tmp_thshold1 = thres1;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres2 = temp_threshold.tmp_thshold2;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres2 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres2 == 0) {
+ tmp_threshold.tmp_thshold2_enable = 0;
+ tmp_threshold.tmp_thshold2 = thres2;
+ } else {
+ tmp_threshold.tmp_thshold2_enable = 1;
+ tmp_threshold.tmp_thshold2 = thres2;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
+ u64 *thres_trip)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres_trip = temp_threshold.therm_trip_thshold;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold1_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold2_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 *thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_policy = temp_threshold.thshold_policy;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold tmp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+
+ if (thres1_policy == 0) {
+ tmp_threshold.thshold_policy = 0;
+ } else if (thres1_policy == 1) {
+ tmp_threshold.thshold_policy = 1;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
+ *temp = temp_rdsensor_fmt1.fpga_temp;
+
+ return 0;
+}
+
+static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_thermal *fme_thermal
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_thermal->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
+
+static int fme_thermal_mgmt_init(struct feature *feature)
+{
+ struct feature_fme_thermal *fme_thermal;
+ struct feature_fme_tmp_threshold_cap thermal_cap;
+
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt Init.\n");
+
+ fme_thermal = (struct feature_fme_thermal *)feature->addr;
+ thermal_cap.csr = readq(&fme_thermal->threshold_cap);
+
+ dev_info(NULL, "FME thermal cap %llx.\n",
+ (unsigned long long)fme_thermal->threshold_cap.csr);
+
+ if (thermal_cap.tmp_thshold_disabled)
+ feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
+
+ return 0;
+}
+
+static void fme_thermal_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt UInit.\n");
+}
+
+static int
+fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_set_threshold1(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_set_threshold2(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_set_threshold1_policy(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int
+fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
+ prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
+ prop->prop_id != FME_THERMAL_PROP_REVISION)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_get_threshold1(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_get_threshold2(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD_TRIP:
+ return fme_thermal_get_threshold_trip(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_REACHED:
+ return fme_thermal_get_threshold1_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2_REACHED:
+ return fme_thermal_get_threshold2_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_get_threshold1_policy(fme, &prop->data);
+ case FME_THERMAL_PROP_TEMPERATURE:
+ return fme_thermal_get_temperature(fme, &prop->data);
+ case FME_THERMAL_PROP_REVISION:
+ return fme_thermal_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+ .uinit = fme_thermal_mgmt_uinit,
+ .get_prop = fme_thermal_get_prop,
+ .set_prop = fme_thermal_set_prop,
+};
+
+static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *consumed = pm_status.pwr_consumed;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold1;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold1 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold2;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold2 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold1_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold2_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *rtl = pm_status.fpga_latency_report;
+
+ return 0;
+}
+
+static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_xeon_limit xeon_limit;
+
+ xeon_limit.csr = readq(&fme_power->xeon_limit);
+
+ if (!xeon_limit.enable)
+ xeon_limit.pwr_limit = 0;
+
+ *limit = xeon_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_fpga_limit fpga_limit;
+
+ fpga_limit.csr = readq(&fme_power->fpga_limit);
+
+ if (!fpga_limit.enable)
+ fpga_limit.pwr_limit = 0;
+
+ *limit = fpga_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_power->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_power_mgmt_init(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt Init.\n");
+
+ return 0;
+}
+
+static void fme_power_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt UInit.\n");
+}
+
+static int fme_power_mgmt_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_CONSUMED:
+ return fme_pwr_get_consumed(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_get_threshold1(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_get_threshold2(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1_STATUS:
+ return fme_pwr_get_threshold1_status(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2_STATUS:
+ return fme_pwr_get_threshold2_status(fme, &prop->data);
+ case FME_PWR_PROP_RTL:
+ return fme_pwr_get_rtl(fme, &prop->data);
+ case FME_PWR_PROP_XEON_LIMIT:
+ return fme_pwr_get_xeon_limit(fme, &prop->data);
+ case FME_PWR_PROP_FPGA_LIMIT:
+ return fme_pwr_get_fpga_limit(fme, &prop->data);
+ case FME_PWR_PROP_REVISION:
+ return fme_pwr_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_power_mgmt_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_set_threshold1(fme, prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_set_threshold2(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+ .uinit = fme_power_mgmt_uinit,
+ .get_prop = fme_power_mgmt_get_prop,
+ .set_prop = fme_power_mgmt_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
new file mode 100644
index 00000000..1773b87e
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_dperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_clk_ctr clk;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ clk.afu_interf_clock = readq(&dperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_dperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_header header;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ header.csr = readq(&dperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define DPERF_TIMEOUT 30
+
+static bool fabric_pobj_is_enabled(int port_id,
+ struct feature_fme_dperf *dperf)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum dperf_fab_events fab_event)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dfpmon_fab_ctr ctr;
+ struct feature_fme_dperf *dperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, dperf))
+ goto exit;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &dperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &dperf->fab_ctr, DPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&dperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, DPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, DPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(mmio_read, DPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, DPERF_FAB_MMIO_WR);
+
+static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_dperf *dperf;
+ int status;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ status = fabric_pobj_is_enabled(port_id, dperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dperf *dperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, dperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&dperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+
+static int fme_global_dperf_init(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf Init.\n");
+
+ return 0;
+}
+
+static void fme_global_dperf_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf UInit.\n");
+}
+
+static int fme_dperf_fab_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_dperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_dperf_get_fab_port_pcie0_read(fme, sub, &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_dperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* MMIO_READ */
+ return fme_dperf_get_fab_port_mmio_read(fme, sub, &prop->data);
+ case 0x5: /* MMIO_WRITE */
+ return fme_dperf_get_fab_port_mmio_write(fme, sub, &prop->data);
+ case 0x6: /* ENABLE */
+ return fme_dperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_dperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_dperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_dperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_fab_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE - fab root only prop */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_dperf_set_fab_freeze(fme, prop->data);
+ case 0x6: /* ENABLE - fab both root and sub */
+ return fme_dperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_dperf_ops = {
+ .init = fme_global_dperf_init,
+ .uinit = fme_global_dperf_uinit,
+ .get_prop = fme_global_dperf_get_prop,
+ .set_prop = fme_global_dperf_set_prop,
+
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
new file mode 100644
index 00000000..8c26fb25
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_error0 fme_error0;
+
+ fme_error0.csr = readq(&fme_err->fme_err);
+ *val = fme_error0.csr;
+
+ return 0;
+}
+
+static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_first_error fme_first_err;
+
+ fme_first_err.csr = readq(&fme_err->fme_first_err);
+ *val = fme_first_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_next_error fme_next_err;
+
+ fme_next_err.csr = readq(&fme_err->fme_next_err);
+ *val = fme_next_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_error0 fme_error0;
+ struct feature_fme_first_error fme_first_err;
+ struct feature_fme_next_error fme_next_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_ERROR0_MASK, &fme_err->fme_err_mask);
+
+ fme_error0.csr = readq(&fme_err->fme_err);
+ if (val != fme_error0.csr) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ fme_first_err.csr = readq(&fme_err->fme_first_err);
+ fme_next_err.csr = readq(&fme_err->fme_next_err);
+
+ writeq(fme_error0.csr & FME_ERROR0_MASK, &fme_err->fme_err);
+ writeq(fme_first_err.csr & FME_FIRST_ERROR_MASK,
+ &fme_err->fme_first_err);
+ writeq(fme_next_err.csr & FME_NEXT_ERROR_MASK,
+ &fme_err->fme_next_err);
+
+exit:
+ writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_header header;
+
+ header.csr = readq(&fme_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ *val = pcie0_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ if (val != pcie0_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
+ &fme_err->pcie0_err);
+
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ *val = pcie1_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ if (val != pcie1_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
+ &fme_err->pcie1_err);
+
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+
+ ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
+ *val = ras_nonfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+
+ ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
+ *val = ras_catfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+ *val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
+
+ return 0;
+}
+
+static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ spinlock_lock(&fme->lock);
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+
+ if (val <= FME_RAS_ERROR_INJ_MASK) {
+ ras_error_inj.csr = val;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static void fme_error_enable(struct ifpga_fme_hw *fme)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+
+ writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ writeq(0UL, &fme_err->ras_nonfat_mask);
+ writeq(0UL, &fme_err->ras_catfat_mask);
+}
+
+static int fme_global_error_init(struct feature *feature)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ fme_error_enable(fme);
+
+ if (feature->ctx_num)
+ fme->capability |= FPGA_FME_CAP_ERR_IRQ;
+
+ return 0;
+}
+
+static void fme_global_error_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int fme_err_fme_err_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* ERRORS */
+ return fme_err_get_errors(fme, &prop->data);
+ case 0x2: /* FIRST_ERROR */
+ return fme_err_get_first_error(fme, &prop->data);
+ case 0x3: /* NEXT_ERROR */
+ return fme_err_get_next_error(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x5: /* REVISION */
+ return fme_err_get_revision(fme, &prop->data);
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_get_pcie0_errors(fme, &prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_get_pcie1_errors(fme, &prop->data);
+ case 0x8: /* NONFATAL_ERRORS */
+ return fme_err_get_nonfatal_errors(fme, &prop->data);
+ case 0x9: /* CATFATAL_ERRORS */
+ return fme_err_get_catfatal_errors(fme, &prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_get_inject_errors(fme, &prop->data);
+ case 0xb: /* REVISION*/
+ return fme_err_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_get_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_fme_err_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x4: /* CLEAR */
+ return fme_err_set_clear(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_set_pcie0_errors(fme, prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_set_pcie1_errors(fme, prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_set_inject_errors(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_set_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_err_ops = {
+ .init = fme_global_error_init,
+ .uinit = fme_global_error_uinit,
+ .get_prop = fme_global_error_get_prop,
+ .set_prop = fme_global_error_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
new file mode 100644
index 00000000..e6c40a19
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
@@ -0,0 +1,715 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_iperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_clk_ctr clk;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ clk.afu_interf_clock = readq(&iperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_iperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_header header;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ header.csr = readq(&iperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ *freeze = (u64)ctl.freeze;
+ return 0;
+}
+
+static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->ch_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define IPERF_TIMEOUT 30
+
+static u64 read_cache_counter(struct ifpga_fme_hw *fme,
+ u8 channel, enum iperf_cache_events event)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ struct feature_fme_ifpmon_ch_ctr ctr0, ctr1;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* set channel access type and cache event code. */
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.cci_chsel = channel;
+ ctl.cache_event = event;
+ writeq(ctl.csr, &iperf->ch_ctl);
+
+ /* check the event type in the counter registers */
+ ctr0.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr0,
+ &iperf->ch_ctr0, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched cache event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr0.csr = readq(&iperf->ch_ctr0);
+ ctr1.csr = readq(&iperf->ch_ctr1);
+ counter = ctr0.cache_counter + ctr1.cache_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define CACHE_SHOW(name, type, event) \
+static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_cache_counter(fme, type, event); \
+ return 0; \
+}
+
+CACHE_SHOW(read_hit, CACHE_CHANNEL_RD, IPERF_CACHE_RD_HIT);
+CACHE_SHOW(read_miss, CACHE_CHANNEL_RD, IPERF_CACHE_RD_MISS);
+CACHE_SHOW(write_hit, CACHE_CHANNEL_WR, IPERF_CACHE_WR_HIT);
+CACHE_SHOW(write_miss, CACHE_CHANNEL_WR, IPERF_CACHE_WR_MISS);
+CACHE_SHOW(hold_request, CACHE_CHANNEL_RD, IPERF_CACHE_HOLD_REQ);
+CACHE_SHOW(tx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_TX_REQ_STALL);
+CACHE_SHOW(rx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_RX_REQ_STALL);
+CACHE_SHOW(rx_eviction, CACHE_CHANNEL_RD, IPERF_CACHE_EVICTIONS);
+CACHE_SHOW(data_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN);
+CACHE_SHOW(tag_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN);
+
+static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static u64 read_iommu_sip_counter(struct ifpga_fme_hw *fme,
+ enum iperf_vtd_sip_events event)
+{
+ struct feature_fme_ifpmon_vtd_sip_ctl sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr sip_ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ sip_ctl.csr = readq(&iperf->vtd_sip_ctl);
+ sip_ctl.vtd_evtcode = event;
+ writeq(sip_ctl.csr, &iperf->vtd_sip_ctl);
+
+ sip_ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, sip_ctr,
+ &iperf->vtd_sip_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd SIP event type in counter registers\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ sip_ctr.csr = readq(&iperf->vtd_sip_ctr);
+ counter = sip_ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_SIP_SHOW(name, event) \
+static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_iommu_sip_counter(fme, event); \
+ return 0; \
+}
+
+VTD_SIP_SHOW(iotlb_4k_hit, IPERF_VTD_SIP_IOTLB_4K_HIT);
+VTD_SIP_SHOW(iotlb_2m_hit, IPERF_VTD_SIP_IOTLB_2M_HIT);
+VTD_SIP_SHOW(iotlb_1g_hit, IPERF_VTD_SIP_IOTLB_1G_HIT);
+VTD_SIP_SHOW(slpwc_l3_hit, IPERF_VTD_SIP_SLPWC_L3_HIT);
+VTD_SIP_SHOW(slpwc_l4_hit, IPERF_VTD_SIP_SLPWC_L4_HIT);
+VTD_SIP_SHOW(rcc_hit, IPERF_VTD_SIP_RCC_HIT);
+VTD_SIP_SHOW(iotlb_4k_miss, IPERF_VTD_SIP_IOTLB_4K_MISS);
+VTD_SIP_SHOW(iotlb_2m_miss, IPERF_VTD_SIP_IOTLB_2M_MISS);
+VTD_SIP_SHOW(iotlb_1g_miss, IPERF_VTD_SIP_IOTLB_1G_MISS);
+VTD_SIP_SHOW(slpwc_l3_miss, IPERF_VTD_SIP_SLPWC_L3_MISS);
+VTD_SIP_SHOW(slpwc_l4_miss, IPERF_VTD_SIP_SLPWC_L4_MISS);
+VTD_SIP_SHOW(rcc_miss, IPERF_VTD_SIP_RCC_MISS);
+
+static u64 read_iommu_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_vtd_events base_event)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_ifpmon_vtd_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ enum iperf_vtd_events event = base_event + port_id;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.vtd_evtcode = event;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+
+ ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->vtd_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->vtd_ctr);
+ counter = ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_PORT_SHOW(name, base_event) \
+static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_iommu_counter(fme, port_id, base_event); \
+ return 0; \
+}
+
+VTD_PORT_SHOW(read_transaction, IPERF_VTD_AFU_MEM_RD_TRANS);
+VTD_PORT_SHOW(write_transaction, IPERF_VTD_AFU_MEM_WR_TRANS);
+VTD_PORT_SHOW(devtlb_read_hit, IPERF_VTD_AFU_DEVTLB_RD_HIT);
+VTD_PORT_SHOW(devtlb_write_hit, IPERF_VTD_AFU_DEVTLB_WR_HIT);
+VTD_PORT_SHOW(devtlb_4k_fill, IPERF_VTD_DEVTLB_4K_FILL);
+VTD_PORT_SHOW(devtlb_2m_fill, IPERF_VTD_DEVTLB_2M_FILL);
+VTD_PORT_SHOW(devtlb_1g_fill, IPERF_VTD_DEVTLB_1G_FILL);
+
+static bool fabric_pobj_is_enabled(u8 port_id, struct feature_fme_iperf *iperf)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_fab_events fab_event)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_ifpmon_fab_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, iperf))
+ goto exit;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &iperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->fab_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, IPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, IPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(pcie1_read, IPERF_FAB_PCIE1_RD);
+FAB_PORT_SHOW(pcie1_write, IPERF_FAB_PCIE1_WR);
+FAB_PORT_SHOW(upi_read, IPERF_FAB_UPI_RD);
+FAB_PORT_SHOW(upi_write, IPERF_FAB_UPI_WR);
+FAB_PORT_SHOW(mmio_read, IPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, IPERF_FAB_MMIO_WR);
+
+static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_iperf *iperf;
+ int status;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ status = fabric_pobj_is_enabled(port_id, iperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, iperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&iperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+#define FME_IPERF_CAP_IOMMU 0x1
+
+static int fme_global_iperf_init(struct feature *feature)
+{
+ struct ifpga_fme_hw *fme;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+
+ dev_info(NULL, "FME global_iperf Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ /* check if iommu is not supported on this device. */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ dev_info(NULL, "FME HEAD fme_capability %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ if (fme_capability.iommu_support)
+ feature->cap |= FME_IPERF_CAP_IOMMU;
+
+ return 0;
+}
+
+static void fme_global_iperf_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_iperf UInit.\n");
+}
+
+static int fme_iperf_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_iperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_iperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_cache_freeze(fme, &prop->data);
+ case 0x2: /* READ_HIT */
+ return fme_iperf_get_cache_read_hit(fme, &prop->data);
+ case 0x3: /* READ_MISS */
+ return fme_iperf_get_cache_read_miss(fme, &prop->data);
+ case 0x4: /* WRITE_HIT */
+ return fme_iperf_get_cache_write_hit(fme, &prop->data);
+ case 0x5: /* WRITE_MISS */
+ return fme_iperf_get_cache_write_miss(fme, &prop->data);
+ case 0x6: /* HOLD_REQUEST */
+ return fme_iperf_get_cache_hold_request(fme, &prop->data);
+ case 0x7: /* TX_REQ_STALL */
+ return fme_iperf_get_cache_tx_req_stall(fme, &prop->data);
+ case 0x8: /* RX_REQ_STALL */
+ return fme_iperf_get_cache_rx_req_stall(fme, &prop->data);
+ case 0x9: /* RX_EVICTION */
+ return fme_iperf_get_cache_rx_eviction(fme, &prop->data);
+ case 0xa: /* DATA_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_data_write_port_contention(fme,
+ &prop->data);
+ case 0xb: /* TAG_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_tag_write_port_contention(fme,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_vtd_freeze(fme, &prop->data);
+ case 0x2: /* IOTLB_4K_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_4k_hit(fme, &prop->data);
+ case 0x3: /* IOTLB_2M_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_2m_hit(fme, &prop->data);
+ case 0x4: /* IOTLB_1G_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_1g_hit(fme, &prop->data);
+ case 0x5: /* SLPWC_L3_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l3_hit(fme, &prop->data);
+ case 0x6: /* SLPWC_L4_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l4_hit(fme, &prop->data);
+ case 0x7: /* RCC_HIT */
+ return fme_iperf_get_vtd_sip_rcc_hit(fme, &prop->data);
+ case 0x8: /* IOTLB_4K_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_4k_miss(fme, &prop->data);
+ case 0x9: /* IOTLB_2M_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_2m_miss(fme, &prop->data);
+ case 0xa: /* IOTLB_1G_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_1g_miss(fme, &prop->data);
+ case 0xb: /* SLPWC_L3_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l3_miss(fme, &prop->data);
+ case 0xc: /* SLPWC_L4_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l4_miss(fme, &prop->data);
+ case 0xd: /* RCC_MISS */
+ return fme_iperf_get_vtd_sip_rcc_miss(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_sub_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub > PERF_MAX_PORT_NUM)
+ return -ENOENT;
+
+ switch (id) {
+ case 0xe: /* READ_TRANSACTION */
+ return fme_iperf_get_vtd_port_read_transaction(fme, sub,
+ &prop->data);
+ case 0xf: /* WRITE_TRANSACTION */
+ return fme_iperf_get_vtd_port_write_transaction(fme, sub,
+ &prop->data);
+ case 0x10: /* DEVTLB_READ_HIT */
+ return fme_iperf_get_vtd_port_devtlb_read_hit(fme, sub,
+ &prop->data);
+ case 0x11: /* DEVTLB_WRITE_HIT */
+ return fme_iperf_get_vtd_port_devtlb_write_hit(fme, sub,
+ &prop->data);
+ case 0x12: /* DEVTLB_4K_FILL */
+ return fme_iperf_get_vtd_port_devtlb_4k_fill(fme, sub,
+ &prop->data);
+ case 0x13: /* DEVTLB_2M_FILL */
+ return fme_iperf_get_vtd_port_devtlb_2m_fill(fme, sub,
+ &prop->data);
+ case 0x14: /* DEVTLB_1G_FILL */
+ return fme_iperf_get_vtd_port_devtlb_1g_fill(fme, sub,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED)
+ return fme_iperf_vtd_root_get_prop(feature, prop);
+
+ return fme_iperf_vtd_sub_get_prop(feature, prop);
+}
+
+static int fme_iperf_fab_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ /* Other properties are present for both top and sub levels */
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_iperf_get_fab_port_pcie0_read(fme, sub,
+ &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_iperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* PCIE1_READ */
+ return fme_iperf_get_fab_port_pcie1_read(fme, sub,
+ &prop->data);
+ case 0x5: /* PCIE1_WRITE */
+ return fme_iperf_get_fab_port_pcie1_write(fme, sub,
+ &prop->data);
+ case 0x6: /* UPI_READ */
+ return fme_iperf_get_fab_port_upi_read(fme, sub,
+ &prop->data);
+ case 0x7: /* UPI_WRITE */
+ return fme_iperf_get_fab_port_upi_write(fme, sub,
+ &prop->data);
+ case 0x8: /* MMIO_READ */
+ return fme_iperf_get_fab_port_mmio_read(fme, sub,
+ &prop->data);
+ case 0x9: /* MMIO_WRITE */
+ return fme_iperf_get_fab_port_mmio_write(fme, sub,
+ &prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_get_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_get_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_iperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_cache_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_vtd_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_fab_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_set_fab_freeze(fme, prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_set_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_set_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_iperf_ops = {
+ .init = fme_global_iperf_init,
+ .uinit = fme_global_iperf_uinit,
+ .get_prop = fme_global_iperf_get_prop,
+ .set_prop = fme_global_iperf_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
new file mode 100644
index 00000000..ec0beeb1
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static u64
+pr_err_handle(struct feature_fme_pr *fme_pr)
+{
+ struct feature_fme_pr_status fme_pr_status;
+ unsigned long err_code;
+ u64 fme_pr_error;
+ int i;
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ if (!fme_pr_status.pr_status)
+ return 0;
+
+ err_code = readq(&fme_pr->ccip_fme_pr_err);
+ fme_pr_error = err_code;
+
+ for (i = 0; i < PR_MAX_ERR_NUM; i++) {
+ if (err_code & (1 << i))
+ dev_info(NULL, "%s\n", pr_err_msg[i]);
+ }
+
+ writeq(fme_pr_error, &fme_pr->ccip_fme_pr_err);
+ return fme_pr_error;
+}
+
+static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ if (info->flags != FPGA_MGR_PARTIAL_RECONFIG)
+ return -EINVAL;
+
+ dev_info(fme_dev, "resetting PR before initiated PR\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ fme_pr_ctl.pr_reset_ack = 1;
+
+ if (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 0;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "waiting for PR resource in HW to be initialized and ready\n");
+
+ fme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;
+
+ if (fpga_wait_register_field(pr_host_status, fme_pr_status,
+ &fme_pr->ccip_fme_pr_status,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "check if have any previous PR error\n");
+ pr_err_handle(fme_pr);
+ return 0;
+}
+
+static int fme_pr_write(struct ifpga_fme_hw *fme_dev,
+ int port_id, const char *buf, size_t count,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+ struct feature_fme_pr_data fme_pr_data;
+ int delay, pr_credit;
+ int ret = 0;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ dev_info(fme_dev, "set PR port ID and start request\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_regionid = port_id;
+ fme_pr_ctl.pr_start_req = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "pushing data from bitstream to HW\n");
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+
+ while (count > 0) {
+ delay = 0;
+ while (pr_credit <= 1) {
+ if (delay++ > PR_WAIT_TIMEOUT) {
+ dev_err(fme_dev, "maximum try\n");
+
+ info->pr_err = pr_err_handle(fme_pr);
+ return info->pr_err ? -EIO : -ETIMEDOUT;
+ }
+ udelay(1);
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+ };
+
+ if (count >= fme_dev->pr_bandwidth) {
+ switch (fme_dev->pr_bandwidth) {
+ case 4:
+ fme_pr_data.rsvd = 0;
+ fme_pr_data.pr_data_raw = *((const u32 *)buf);
+ writeq(fme_pr_data.csr,
+ &fme_pr->ccip_fme_pr_data);
+ break;
+ default:
+ ret = -EFAULT;
+ goto done;
+ }
+
+ buf += fme_dev->pr_bandwidth;
+ count -= fme_dev->pr_bandwidth;
+ pr_credit--;
+ } else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_push_complete = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "green bitstream push complete\n");
+ dev_info(fme_dev, "waiting for HW to release PR resource\n");
+
+ fme_pr_ctl.pr_start_req = 0;
+
+ if (fpga_wait_register_field(pr_start_req, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ printf("maximum try.\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "PR operation complete, checking status\n");
+ info->pr_err = pr_err_handle(fme_pr);
+ if (info->pr_err)
+ return -EIO;
+
+ dev_info(fme_dev, "PR done successfully\n");
+ return 0;
+}
+
+static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info, const char *buf,
+ size_t count)
+{
+ int ret;
+
+ info->state = FPGA_PR_STATE_WRITE_INIT;
+ ret = fme_pr_write_init(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error preparing FPGA for writing\n");
+ info->state = FPGA_PR_STATE_WRITE_INIT_ERR;
+ return ret;
+ }
+
+ /*
+ * Write the FPGA image to the FPGA.
+ */
+ info->state = FPGA_PR_STATE_WRITE;
+ ret = fme_pr_write(fme_dev, info->port_id, buf, count, info);
+ if (ret) {
+ dev_err(fme_dev, "Error while writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_ERR;
+ return ret;
+ }
+
+ /*
+ * After all the FPGA image has been written, do the device specific
+ * steps to finish and set the FPGA into operating mode.
+ */
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE;
+ ret = fme_pr_write_complete(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error after writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;
+ return ret;
+ }
+ info->state = FPGA_PR_STATE_DONE;
+
+ return 0;
+}
+
+static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct fpga_pr_info info;
+ struct ifpga_port_hw *port;
+ int ret = 0;
+
+ if (!buffer || size == 0)
+ return -EINVAL;
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -EINVAL;
+
+ /*
+ * Padding extra zeros to align PR buffer with PR bandwidth, HW will
+ * ignore these zeros automatically.
+ */
+ size = IFPGA_ALIGN(size, fme->pr_bandwidth);
+
+ /* get fme header region */
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_HEADER);
+ if (!fme_hdr)
+ return -EINVAL;
+
+ /* check port id */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ if (port_id >= fme_capability.num_ports) {
+ dev_err(fme, "port number more than maximum\n");
+ return -EINVAL;
+ }
+
+ memset(&info, 0, sizeof(struct fpga_pr_info));
+ info.flags = FPGA_MGR_PARTIAL_RECONFIG;
+ info.port_id = port_id;
+
+ spinlock_lock(&fme->lock);
+
+ /* get port device by port_id */
+ port = &hw->port[port_id];
+
+ /* Disable Port before PR */
+ fpga_port_disable(port);
+
+ ret = fpga_pr_buf_load(fme, &info, (void *)buffer, size);
+
+ *status = info.pr_err;
+
+ /* Re-enable Port after PR finished */
+ fpga_port_enable(port);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, u64 *status)
+{
+ struct bts_header *bts_hdr;
+ void *buf;
+ struct ifpga_port_hw *port;
+ int ret;
+
+ if (!buffer || size == 0) {
+ dev_err(hw, "invalid parameter\n");
+ return -EINVAL;
+ }
+
+ bts_hdr = (struct bts_header *)buffer;
+
+ if (is_valid_bts(bts_hdr)) {
+ dev_info(hw, "this is a valid bitsteam..\n");
+ size -= (sizeof(struct bts_header) +
+ bts_hdr->metadata_len);
+ buf = (u8 *)buffer + sizeof(struct bts_header) +
+ bts_hdr->metadata_len;
+ } else {
+ return -EINVAL;
+ }
+
+ /* clean port error before do PR */
+ port = &hw->port[port_id];
+ ret = port_clear_error(port);
+ if (ret) {
+ dev_err(hw, "port cannot clear error\n");
+ return -EINVAL;
+ }
+
+ return fme_pr(hw, port_id, buf, size, status);
+}
+
+static int fme_pr_mgmt_init(struct feature *feature)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_header fme_pr_header;
+ struct ifpga_fme_hw *fme;
+
+ dev_info(NULL, "FME PR MGMT Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+
+ fme_pr = (struct feature_fme_pr *)feature->addr;
+
+ fme_pr_header.csr = readq(&fme_pr->header);
+ if (fme_pr_header.revision == 2) {
+ dev_info(NULL, "using 512-bit PR\n");
+ fme->pr_bandwidth = 64;
+ } else {
+ dev_info(NULL, "using 32-bit PR\n");
+ fme->pr_bandwidth = 4;
+ }
+
+ return 0;
+}
+
+static void fme_pr_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME PR MGMT UInit.\n");
+}
+
+struct feature_ops fme_pr_mgmt_ops = {
+ .init = fme_pr_mgmt_init,
+ .uinit = fme_pr_mgmt_uinit,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_hw.h b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
new file mode 100644
index 00000000..a20520c9
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_HW_H_
+#define _IFPGA_HW_H_
+
+#include "ifpga_defines.h"
+#include "opae_ifpga_hw_api.h"
+
+enum ifpga_feature_state {
+ IFPGA_FEATURE_UNUSED = 0,
+ IFPGA_FEATURE_ATTACHED,
+};
+
+struct feature_irq_ctx {
+ int eventfd;
+ int idx;
+};
+
+struct feature {
+ enum ifpga_feature_state state;
+ const char *name;
+ u64 id;
+ u8 *addr;
+ uint64_t phys_addr;
+ u32 size;
+ int revision;
+ u64 cap;
+ int vfio_dev_fd;
+ struct feature_irq_ctx *ctx;
+ unsigned int ctx_num;
+
+ void *parent; /* to parent hw data structure */
+
+ struct feature_ops *ops;/* callback to this private feature */
+};
+
+struct feature_ops {
+ int (*init)(struct feature *feature);
+ void (*uinit)(struct feature *feature);
+ int (*get_prop)(struct feature *feature, struct feature_prop *prop);
+ int (*set_prop)(struct feature *feature, struct feature_prop *prop);
+ int (*set_irq)(struct feature *feature, void *irq_set);
+};
+
+enum ifpga_fme_state {
+ IFPGA_FME_UNUSED = 0,
+ IFPGA_FME_IMPLEMENTED,
+};
+
+struct ifpga_fme_hw {
+ enum ifpga_fme_state state;
+
+ struct feature sub_feature[FME_FEATURE_ID_MAX];
+ spinlock_t lock; /* protect hardware access */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ /* provied by HEADER feature */
+ u32 port_num;
+ struct uuid bitstream_id;
+ u64 bitstream_md;
+ size_t pr_bandwidth;
+ u32 socket_id;
+ u32 fabric_version_id;
+ u32 cache_size;
+
+ u32 capability;
+};
+
+enum ifpga_port_state {
+ IFPGA_PORT_UNUSED = 0,
+ IFPGA_PORT_ATTACHED,
+ IFPGA_PORT_DETACHED,
+};
+
+struct ifpga_port_hw {
+ enum ifpga_port_state state;
+
+ struct feature sub_feature[PORT_FEATURE_ID_MAX];
+ spinlock_t lock; /* protect access to hw */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ int port_id; /* provied by HEADER feature */
+ struct uuid afu_id; /* provied by User AFU feature */
+
+ unsigned int disable_count;
+
+ u32 capability;
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+ u8 *stp_addr;
+ u32 stp_size;
+};
+
+#define AFU_MAX_REGION 1
+
+struct ifpga_afu_info {
+ struct opae_reg_region region[AFU_MAX_REGION];
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct ifpga_hw {
+ struct opae_adapter *adapter;
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_fme_hw fme;
+ struct ifpga_port_hw port[MAX_FPGA_PORT_NUM];
+};
+
+static inline bool is_ifpga_hw_pf(struct ifpga_hw *hw)
+{
+ return hw->fme.state != IFPGA_FME_UNUSED;
+}
+
+static inline bool is_valid_port_id(struct ifpga_hw *hw, u32 port_id)
+{
+ if (port_id >= MAX_FPGA_PORT_NUM ||
+ hw->port[port_id].state != IFPGA_PORT_ATTACHED)
+ return false;
+
+ return true;
+}
+#endif /* _IFPGA_HW_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port.c b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
new file mode 100644
index 00000000..a962f5b4
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+static int port_get_revision(struct ifpga_port_hw *port, u64 *revision)
+{
+ struct feature_port_header *port_hdr
+ = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&port_hdr->header);
+
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int port_get_portidx(struct ifpga_port_hw *port, u64 *idx)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ capability.csr = readq(&port_hdr->capability);
+ *idx = capability.port_number;
+
+ return 0;
+}
+
+static int port_get_latency_tolerance(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ control.csr = readq(&port_hdr->control);
+ *val = control.latency_tolerance;
+
+ return 0;
+}
+
+static int port_get_ap1_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap1_event;
+
+ return 0;
+}
+
+static int port_set_ap1_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap1_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_ap2_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap2_event;
+
+ return 0;
+}
+
+static int port_set_ap2_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap2_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_power_state(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.power_state;
+
+ return 0;
+}
+
+static int port_get_userclk_freqcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_hdr_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port hdr Init.\n");
+
+ fpga_port_reset(port);
+
+ return 0;
+}
+
+static void port_hdr_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port hdr uinit.\n");
+}
+
+static int port_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_REVISION:
+ return port_get_revision(port, &prop->data);
+ case PORT_HDR_PROP_PORTIDX:
+ return port_get_portidx(port, &prop->data);
+ case PORT_HDR_PROP_LATENCY_TOLERANCE:
+ return port_get_latency_tolerance(port, &prop->data);
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_get_ap1_event(port, &prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_get_ap2_event(port, &prop->data);
+ case PORT_HDR_PROP_POWER_STATE:
+ return port_get_power_state(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_get_userclk_freqcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_get_userclk_freqcntrcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQSTS:
+ return port_get_userclk_freqsts(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_CNTRSTS:
+ return port_get_userclk_freqcntrsts(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_hdr_set_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_set_ap1_event(port, prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_set_ap2_event(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_set_userclk_freqcmd(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_set_userclk_freqcntrcmd(port, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops port_hdr_ops = {
+ .init = port_hdr_init,
+ .uinit = port_hdr_uinit,
+ .get_prop = port_hdr_get_prop,
+ .set_prop = port_hdr_set_prop,
+};
+
+static int port_stp_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port stp Init.\n");
+
+ spinlock_lock(&port->lock);
+ port->stp_addr = feature->addr;
+ port->stp_size = feature->size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_stp_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port stp uinit.\n");
+}
+
+struct feature_ops port_stp_ops = {
+ .init = port_stp_init,
+ .uinit = port_stp_uinit,
+};
+
+static int port_uint_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "PORT UINT Init.\n");
+
+ spinlock_lock(&port->lock);
+ if (feature->ctx_num) {
+ port->capability |= FPGA_PORT_CAP_UAFU_IRQ;
+ port->num_uafu_irqs = feature->ctx_num;
+ }
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_uint_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "PORT UINT UInit.\n");
+}
+
+struct feature_ops port_uint_ops = {
+ .init = port_uint_init,
+ .uinit = port_uint_uinit,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
new file mode 100644
index 00000000..23db562b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int port_err_get_revision(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_header header;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ header.csr = readq(&port_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int port_err_get_errors(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+ *val = error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_error(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_first_err_key first_error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ first_error.csr = readq(&port_err->port_first_error);
+ *val = first_error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_lsb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req0 malreq0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq0.header_lsb = readq(&port_err->malreq0);
+ *val = malreq0.header_lsb;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_msb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req1 malreq1;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq1.header_msb = readq(&port_err->malreq1);
+ *val = malreq1.header_msb;
+
+ return 0;
+}
+
+static int port_err_set_clear(struct ifpga_port_hw *port, u64 val)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = port_err_clear(port, val);
+ spinlock_unlock(&port->lock);
+
+ return ret;
+}
+
+static int port_error_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port error Init.\n");
+
+ spinlock_lock(&port->lock);
+ port_err_mask(port, false);
+ if (feature->ctx_num)
+ port->capability |= FPGA_PORT_CAP_ERR_IRQ;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_error_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int port_error_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_ERR_PROP_REVISION:
+ return port_err_get_revision(port, &prop->data);
+ case PORT_ERR_PROP_ERRORS:
+ return port_err_get_errors(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_ERROR:
+ return port_err_get_first_error(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB:
+ return port_err_get_first_malformed_req_lsb(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB:
+ return port_err_get_first_malformed_req_msb(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_error_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ if (prop->prop_id == PORT_ERR_PROP_CLEAR)
+ return port_err_set_clear(port, prop->data);
+
+ return -ENOENT;
+}
+
+struct feature_ops port_error_ops = {
+ .init = port_error_init,
+ .uinit = port_error_uinit,
+ .get_prop = port_error_get_prop,
+ .set_prop = port_error_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/meson.build b/drivers/raw/ifpga_rawdev/base/meson.build
new file mode 100644
index 00000000..cb655352
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/meson.build
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = [
+ 'ifpga_api.c',
+ 'ifpga_enumerate.c',
+ 'ifpga_feature_dev.c',
+ 'ifpga_fme.c',
+ 'ifpga_fme_iperf.c',
+ 'ifpga_fme_dperf.c',
+ 'ifpga_fme_error.c',
+ 'ifpga_port.c',
+ 'ifpga_port_error.c',
+ 'ifpga_fme_pr.c',
+ 'opae_hw_api.c',
+ 'opae_ifpga_hw_api.c',
+ 'opae_debug.c'
+]
+
+error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',
+ '-Wno-format', '-Wno-unused-but-set-variable',
+ '-Wno-strict-aliasing'
+]
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('ifpga_rawdev_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.c b/drivers/raw/ifpga_rawdev/base/opae_debug.c
new file mode 100644
index 00000000..024d7d29
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#define OPAE_HW_DEBUG
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+
+void opae_manager_dump(struct opae_manager *mgr)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Manger %s\n", mgr->name);
+ opae_log("OPAE Manger OPs = %p\n", mgr->ops);
+ opae_log("OPAE Manager Private Data = %p\n", mgr->data);
+ opae_log("OPAE Adapter(parent) = %p\n", mgr->adapter);
+ opae_log("==========================\n");
+}
+
+void opae_bridge_dump(struct opae_bridge *br)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Bridge %s\n", br->name);
+ opae_log("OPAE Bridge ID = %d\n", br->id);
+ opae_log("OPAE Bridge OPs = %p\n", br->ops);
+ opae_log("OPAE Bridge Private Data = %p\n", br->data);
+ opae_log("OPAE Accelerator(under this bridge) = %p\n", br->acc);
+ opae_log("==========================\n");
+}
+
+void opae_accelerator_dump(struct opae_accelerator *acc)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Accelerator %s\n", acc->name);
+ opae_log("OPAE Accelerator Index = %d\n", acc->index);
+ opae_log("OPAE Accelerator OPs = %p\n", acc->ops);
+ opae_log("OPAE Accelerator Private Data = %p\n", acc->data);
+ opae_log("OPAE Bridge (upstream) = %p\n", acc->br);
+ opae_log("OPAE Manager (upstream) = %p\n", acc->mgr);
+ opae_log("==========================\n");
+
+ if (acc->br)
+ opae_bridge_dump(acc->br);
+}
+
+static void opae_adapter_data_dump(void *data)
+{
+ struct opae_adapter_data *d = data;
+ struct opae_adapter_data_pci *d_pci;
+ struct opae_reg_region *r;
+ int i;
+
+ opae_log("=====%s=====\n", __func__);
+
+ switch (d->type) {
+ case OPAE_FPGA_PCI:
+ d_pci = (struct opae_adapter_data_pci *)d;
+
+ opae_log("OPAE Adapter Type = PCI\n");
+ opae_log("PCI Device ID: 0x%04x\n", d_pci->device_id);
+ opae_log("PCI Vendor ID: 0x%04x\n", d_pci->vendor_id);
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ r = &d_pci->region[i];
+ opae_log("PCI Bar %d: phy(%llx) len(%llx) addr(%p)\n",
+ i, (unsigned long long)r->phys_addr,
+ (unsigned long long)r->len, r->addr);
+ }
+ break;
+ case OPAE_FPGA_NET:
+ break;
+ }
+
+ opae_log("==========================\n");
+}
+
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose)
+{
+ struct opae_accelerator *acc;
+
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Adapter %s\n", adapter->name);
+ opae_log("OPAE Adapter OPs = %p\n", adapter->ops);
+ opae_log("OPAE Adapter Private Data = %p\n", adapter->data);
+ opae_log("OPAE Manager (downstream) = %p\n", adapter->mgr);
+
+ if (verbose) {
+ if (adapter->mgr)
+ opae_manager_dump(adapter->mgr);
+
+ opae_adapter_for_each_acc(adapter, acc)
+ opae_accelerator_dump(acc);
+
+ if (adapter->data)
+ opae_adapter_data_dump(adapter->data);
+ }
+
+ opae_log("==========================\n");
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.h b/drivers/raw/ifpga_rawdev/base/opae_debug.h
new file mode 100644
index 00000000..a03dff92
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_DEBUG_H_
+#define _OPAE_DEBUG_H_
+
+#ifdef OPAE_HW_DEBUG
+#define opae_log(fmt, args...) printf(fmt, ## args)
+#else
+#define opae_log(fme, args...) do {} while (0)
+#endif
+
+void opae_manager_dump(struct opae_manager *mgr);
+void opae_bridge_dump(struct opae_bridge *br);
+void opae_accelerator_dump(struct opae_accelerator *acc);
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose);
+
+#endif /* _OPAE_DEBUG_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
new file mode 100644
index 00000000..a533dfea
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+#include "ifpga_api.h"
+
+/* OPAE Bridge Functions */
+
+/**
+ * opae_bridge_alloc - alloc opae_bridge data structure
+ * @name: bridge name.
+ * @ops: ops of this bridge.
+ * @data: private data of this bridge.
+ *
+ * Return opae_bridge on success, otherwise NULL.
+ */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data)
+{
+ struct opae_bridge *br = opae_zmalloc(sizeof(*br));
+
+ if (!br)
+ return NULL;
+
+ br->name = name;
+ br->ops = ops;
+ br->data = data;
+
+ opae_log("%s %p\n", __func__, br);
+
+ return br;
+}
+
+/**
+ * opae_bridge_reset - reset opae_bridge
+ * @br: bridge to be reset.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_bridge_reset(struct opae_bridge *br)
+{
+ if (!br)
+ return -EINVAL;
+
+ if (br->ops && br->ops->reset)
+ return br->ops->reset(br);
+
+ opae_log("%s no ops\n", __func__);
+
+ return -ENOENT;
+}
+
+/* Accelerator Functions */
+
+/**
+ * opae_accelerator_alloc - alloc opae_accelerator data structure
+ * @name: accelerator name.
+ * @ops: ops of this accelerator.
+ * @data: private data of this accelerator.
+ *
+ * Return: opae_accelerator on success, otherwise NULL.
+ */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data)
+{
+ struct opae_accelerator *acc = opae_zmalloc(sizeof(*acc));
+
+ if (!acc)
+ return NULL;
+
+ acc->name = name;
+ acc->ops = ops;
+ acc->data = data;
+
+ opae_log("%s %p\n", __func__, acc);
+
+ return acc;
+}
+
+/**
+ * opae_acc_reg_read - read accelerator's register from its reg region.
+ * @acc: accelerator to read.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: read operation width, e.g 4 byte = 32bit read.
+ * @data: data to store the value read from the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->read)
+ return acc->ops->read(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_reg_write - write to accelerator's register from its reg region.
+ * @acc: accelerator to write.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: write operation width, e.g 4 byte = 32bit write.
+ * @data: data stored the value to write to the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->write)
+ return acc->ops->write(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_info - get information of an accelerator.
+ * @acc: targeted accelerator
+ * @info: accelerator info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_info)
+ return acc->ops->get_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_region_info - get information of an accelerator register region.
+ * @acc: targeted accelerator
+ * @info: accelerator region info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_region_info)
+ return acc->ops->get_region_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_set_irq - set an accelerator's irq.
+ * @acc: targeted accelerator
+ * @start: start vector number
+ * @count: count of vectors to be set from the start vector
+ * @evtfds: event fds to be notified when corresponding irqs happens
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ if (!acc || !acc->data)
+ return -EINVAL;
+
+ if (start + count <= start)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->set_irq)
+ return acc->ops->set_irq(acc, start, count, evtfds);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_uuid - get accelerator's UUID.
+ * @acc: targeted accelerator
+ * @uuid: a pointer to UUID
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ if (!acc || !uuid)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_uuid)
+ return acc->ops->get_uuid(acc, uuid);
+
+ return -ENOENT;
+}
+
+/* Manager Functions */
+
+/**
+ * opae_manager_alloc - alloc opae_manager data structure
+ * @name: manager name.
+ * @ops: ops of this manager.
+ * @data: private data of this manager.
+ *
+ * Return: opae_manager on success, otherwise NULL.
+ */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data)
+{
+ struct opae_manager *mgr = opae_zmalloc(sizeof(*mgr));
+
+ if (!mgr)
+ return NULL;
+
+ mgr->name = name;
+ mgr->ops = ops;
+ mgr->data = data;
+
+ opae_log("%s %p\n", __func__, mgr);
+
+ return mgr;
+}
+
+/**
+ * opae_manager_flash - flash a reconfiguration image via opae_manager
+ * @mgr: opae_manager for flash.
+ * @id: id of target region (accelerator).
+ * @buf: image data buffer.
+ * @size: buffer size.
+ * @status: status to store flash result.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_manager_flash(struct opae_manager *mgr, int id, void *buf, u32 size,
+ u64 *status)
+{
+ if (!mgr)
+ return -EINVAL;
+
+ if (mgr && mgr->ops && mgr->ops->flash)
+ return mgr->ops->flash(mgr, id, buf, size, status);
+
+ return -ENOENT;
+}
+
+/* Adapter Functions */
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @type: opae_adapter_type.
+ *
+ * Return: opae_adapter_data on success, otherwise NULL.
+ */
+void *opae_adapter_data_alloc(enum opae_adapter_type type)
+{
+ struct opae_adapter_data *data;
+ int size;
+
+ switch (type) {
+ case OPAE_FPGA_PCI:
+ size = sizeof(struct opae_adapter_data_pci);
+ break;
+ case OPAE_FPGA_NET:
+ size = sizeof(struct opae_adapter_data_net);
+ break;
+ default:
+ size = sizeof(struct opae_adapter_data);
+ break;
+ }
+
+ data = opae_zmalloc(size);
+ if (!data)
+ return NULL;
+
+ data->type = type;
+
+ return data;
+}
+
+static struct opae_adapter_ops *match_ops(struct opae_adapter *adapter)
+{
+ struct opae_adapter_data *data;
+
+ if (!adapter || !adapter->data)
+ return NULL;
+
+ data = adapter->data;
+
+ if (data->type == OPAE_FPGA_PCI)
+ return &ifpga_adapter_ops;
+
+ return NULL;
+}
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @name: adapter name.
+ * @data: private data of this adapter.
+ *
+ * Return: opae_adapter on success, otherwise NULL.
+ */
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data)
+{
+ struct opae_adapter *adapter = opae_zmalloc(sizeof(*adapter));
+
+ if (!adapter)
+ return NULL;
+
+ TAILQ_INIT(&adapter->acc_list);
+ adapter->data = data;
+ adapter->name = name;
+ adapter->ops = match_ops(adapter);
+
+ return adapter;
+}
+
+/**
+ * opae_adapter_enumerate - enumerate this adapter
+ * @adapter: adapter to enumerate.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_adapter_enumerate(struct opae_adapter *adapter)
+{
+ int ret = -ENOENT;
+
+ if (!adapter)
+ return -EINVAL;
+
+ if (adapter->ops && adapter->ops->enumerate)
+ ret = adapter->ops->enumerate(adapter);
+
+ if (!ret)
+ opae_adapter_dump(adapter, 1);
+
+ return ret;
+}
+
+/**
+ * opae_adapter_destroy - destroy this adapter
+ * @adapter: adapter to destroy.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+void opae_adapter_destroy(struct opae_adapter *adapter)
+{
+ if (adapter && adapter->ops && adapter->ops->destroy)
+ adapter->ops->destroy(adapter);
+}
+
+/**
+ * opae_adapter_get_acc - find and return accelerator with matched id
+ * @adapter: adapter to find the accelerator.
+ * @acc_id: id (index) of the accelerator.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id)
+{
+ struct opae_accelerator *acc = NULL;
+
+ if (!adapter)
+ return NULL;
+
+ opae_adapter_for_each_acc(adapter, acc)
+ if (acc->index == acc_id)
+ return acc;
+
+ return NULL;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
new file mode 100644
index 00000000..4bbc9df5
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_HW_API_H_
+#define _OPAE_HW_API_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include "opae_osdep.h"
+
+#ifndef PCI_MAX_RESOURCE
+#define PCI_MAX_RESOURCE 6
+#endif
+
+struct opae_adapter;
+
+enum opae_adapter_type {
+ OPAE_FPGA_PCI,
+ OPAE_FPGA_NET,
+};
+
+/* OPAE Manager Data Structure */
+struct opae_manager_ops;
+
+/*
+ * opae_manager has pointer to its parent adapter, as it could be able to manage
+ * all components on this FPGA device (adapter). If not the case, don't set this
+ * adapter, which limit opae_manager ops to manager itself.
+ */
+struct opae_manager {
+ const char *name;
+ struct opae_adapter *adapter;
+ struct opae_manager_ops *ops;
+ void *data;
+};
+
+/* FIXME: add more management ops, e.g power/thermal and etc */
+struct opae_manager_ops {
+ int (*flash)(struct opae_manager *mgr, int id, void *buffer,
+ u32 size, u64 *status);
+};
+
+/* OPAE Manager APIs */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data);
+#define opae_manager_free(mgr) opae_free(mgr)
+int opae_manager_flash(struct opae_manager *mgr, int acc_id, void *buf,
+ u32 size, u64 *status);
+
+/* OPAE Bridge Data Structure */
+struct opae_bridge_ops;
+
+/*
+ * opae_bridge only has pointer to its downstream accelerator.
+ */
+struct opae_bridge {
+ const char *name;
+ int id;
+ struct opae_accelerator *acc;
+ struct opae_bridge_ops *ops;
+ void *data;
+};
+
+struct opae_bridge_ops {
+ int (*reset)(struct opae_bridge *br);
+};
+
+/* OPAE Bridge APIs */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data);
+int opae_bridge_reset(struct opae_bridge *br);
+#define opae_bridge_free(br) opae_free(br)
+
+/* OPAE Acceleraotr Data Structure */
+struct opae_accelerator_ops;
+
+/*
+ * opae_accelerator has pointer to its upstream bridge(port).
+ * In some cases, if we allow same user to do PR on its own accelerator, then
+ * set the manager pointer during the enumeration. But in other cases, the PR
+ * functions only could be done via manager in another module / thread / service
+ * / application for better protection.
+ */
+struct opae_accelerator {
+ TAILQ_ENTRY(opae_accelerator) node;
+ const char *name;
+ int index;
+ struct opae_bridge *br;
+ struct opae_manager *mgr;
+ struct opae_accelerator_ops *ops;
+ void *data;
+};
+
+struct opae_acc_info {
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct opae_acc_region_info {
+ u32 flags;
+#define ACC_REGION_READ (1 << 0)
+#define ACC_REGION_WRITE (1 << 1)
+#define ACC_REGION_MMIO (1 << 2)
+ u32 index;
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_accelerator_ops {
+ int (*read)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*write)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*get_info)(struct opae_accelerator *acc,
+ struct opae_acc_info *info);
+ int (*get_region_info)(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+ int (*set_irq)(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+ int (*get_uuid)(struct opae_accelerator *acc,
+ struct uuid *uuid);
+};
+
+/* OPAE accelerator APIs */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data);
+#define opae_accelerator_free(acc) opae_free(acc)
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info);
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid);
+
+static inline struct opae_bridge *
+opae_acc_get_br(struct opae_accelerator *acc)
+{
+ return acc ? acc->br : NULL;
+}
+
+static inline struct opae_manager *
+opae_acc_get_mgr(struct opae_accelerator *acc)
+{
+ return acc ? acc->mgr : NULL;
+}
+
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+
+#define opae_acc_reg_read64(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 8, data)
+#define opae_acc_reg_write64(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 8, data)
+#define opae_acc_reg_read32(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 4, data)
+#define opae_acc_reg_write32(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 4, data)
+#define opae_acc_reg_read16(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 2, data)
+#define opae_acc_reg_write16(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 2, data)
+#define opae_acc_reg_read8(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 1, data)
+#define opae_acc_reg_write8(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 1, data)
+
+/*for data stream read/write*/
+int opae_acc_data_read(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_data_write(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+
+/* OPAE Adapter Data Structure */
+struct opae_adapter_data {
+ enum opae_adapter_type type;
+};
+
+struct opae_reg_region {
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_adapter_data_pci {
+ enum opae_adapter_type type;
+ u16 device_id;
+ u16 vendor_id;
+ struct opae_reg_region region[PCI_MAX_RESOURCE];
+ int vfio_dev_fd; /* VFIO device file descriptor */
+};
+
+/* FIXME: OPAE_FPGA_NET type */
+struct opae_adapter_data_net {
+ enum opae_adapter_type type;
+};
+
+struct opae_adapter_ops {
+ int (*enumerate)(struct opae_adapter *adapter);
+ void (*destroy)(struct opae_adapter *adapter);
+};
+
+TAILQ_HEAD(opae_accelerator_list, opae_accelerator);
+
+#define opae_adapter_for_each_acc(adatper, acc) \
+ TAILQ_FOREACH(acc, &adapter->acc_list, node)
+
+struct opae_adapter {
+ const char *name;
+ struct opae_manager *mgr;
+ struct opae_accelerator_list acc_list;
+ struct opae_adapter_ops *ops;
+ void *data;
+};
+
+/* OPAE Adapter APIs */
+void *opae_adapter_data_alloc(enum opae_adapter_type type);
+#define opae_adapter_data_free(data) opae_free(data)
+
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data);
+#define opae_adapter_free(adapter) opae_free(adapter)
+
+int opae_adapter_enumerate(struct opae_adapter *adapter);
+void opae_adapter_destroy(struct opae_adapter *adapter);
+static inline struct opae_manager *
+opae_adapter_get_mgr(struct opae_adapter *adapter)
+{
+ return adapter ? adapter->mgr : NULL;
+}
+
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id);
+
+static inline void opae_adapter_add_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_INSERT_TAIL(&adapter->acc_list, acc, node);
+}
+
+static inline void opae_adapter_remove_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_REMOVE(&adapter->acc_list, acc, node);
+}
+#endif /* _OPAE_HW_API_H_*/
diff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
new file mode 100644
index 00000000..89c7b492
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_ifpga_hw_api.h"
+#include "ifpga_api.h"
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_get_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data || !fme_info)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ spinlock_lock(&fme->lock);
+ fme_info->capability = fme->capability;
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_irq(fme->parent, FEATURE_FIU_ID_FME, 0,
+ IFPGA_FME_FEATURE_ID_GLOBAL_ERR, err_irq_set);
+}
+
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_get_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !port_info)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ port_info->capability = port->capability;
+ port_info->num_uafu_irqs = port->num_uafu_irqs;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !info)
+ return -EINVAL;
+
+ /* Only support STP region now */
+ if (info->index != PORT_REGION_INDEX_STP)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ info->addr = port->stp_addr;
+ info->size = port->stp_size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_ERROR, err_irq_set);
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
new file mode 100644
index 00000000..4c2c9904
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_IFPGA_HW_API_H_
+#define _OPAE_IFPGA_HW_API_H_
+
+#include "opae_hw_api.h"
+
+/**
+ * struct feature_prop - data structure for feature property
+ * @feature_id: id of this feature.
+ * @prop_id: id of this property under this feature.
+ * @data: property value to set/get.
+ */
+struct feature_prop {
+ u64 feature_id;
+ u64 prop_id;
+ u64 data;
+};
+
+#define IFPGA_FIU_ID_FME 0x0
+#define IFPGA_FIU_ID_PORT 0x1
+
+#define IFPGA_FME_FEATURE_ID_HEADER 0x0
+#define IFPGA_FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define IFPGA_FME_FEATURE_ID_POWER_MGMT 0x2
+#define IFPGA_FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define IFPGA_FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define IFPGA_FME_FEATURE_ID_PR_MGMT 0x5
+#define IFPGA_FME_FEATURE_ID_HSSI 0x6
+#define IFPGA_FME_FEATURE_ID_GLOBAL_DPERF 0x7
+
+#define IFPGA_PORT_FEATURE_ID_HEADER 0x0
+#define IFPGA_PORT_FEATURE_ID_AFU 0xff
+#define IFPGA_PORT_FEATURE_ID_ERROR 0x10
+#define IFPGA_PORT_FEATURE_ID_UMSG 0x11
+#define IFPGA_PORT_FEATURE_ID_UINT 0x12
+#define IFPGA_PORT_FEATURE_ID_STP 0x13
+
+/*
+ * PROP format (TOP + SUB + ID)
+ *
+ * (~0x0) means this field is unused.
+ */
+#define PROP_TOP GENMASK(31, 24)
+#define PROP_TOP_UNUSED 0xff
+#define PROP_SUB GENMASK(23, 16)
+#define PROP_SUB_UNUSED 0xff
+#define PROP_ID GENMASK(15, 0)
+
+#define PROP(_top, _sub, _id) \
+ (SET_FIELD(PROP_TOP, _top) | SET_FIELD(PROP_SUB, _sub) |\
+ SET_FIELD(PROP_ID, _id))
+
+/* FME head feature's properties*/
+#define FME_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define FME_HDR_PROP_PORTS_NUM 0x2 /* RDONLY */
+#define FME_HDR_PROP_CACHE_SIZE 0x3 /* RDONLY */
+#define FME_HDR_PROP_VERSION 0x4 /* RDONLY */
+#define FME_HDR_PROP_SOCKET_ID 0x5 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_ID 0x6 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_METADATA 0x7 /* RDONLY */
+
+/* FME error reporting feature's properties */
+/* FME error reporting properties format */
+#define ERR_PROP(_top, _id) PROP(_top, 0xff, _id)
+#define ERR_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define ERR_PROP_TOP_FME_ERR 0x1
+#define ERR_PROP_ROOT(_id) ERR_PROP(0xff, _id)
+#define ERR_PROP_FME_ERR(_id) ERR_PROP(ERR_PROP_TOP_FME_ERR, _id)
+
+#define FME_ERR_PROP_ERRORS ERR_PROP_FME_ERR(0x1)
+#define FME_ERR_PROP_FIRST_ERROR ERR_PROP_FME_ERR(0x2)
+#define FME_ERR_PROP_NEXT_ERROR ERR_PROP_FME_ERR(0x3)
+#define FME_ERR_PROP_CLEAR ERR_PROP_FME_ERR(0x4) /* WO */
+#define FME_ERR_PROP_REVISION ERR_PROP_ROOT(0x5)
+#define FME_ERR_PROP_PCIE0_ERRORS ERR_PROP_ROOT(0x6) /* RW */
+#define FME_ERR_PROP_PCIE1_ERRORS ERR_PROP_ROOT(0x7) /* RW */
+#define FME_ERR_PROP_NONFATAL_ERRORS ERR_PROP_ROOT(0x8)
+#define FME_ERR_PROP_CATFATAL_ERRORS ERR_PROP_ROOT(0x9)
+#define FME_ERR_PROP_INJECT_ERRORS ERR_PROP_ROOT(0xa) /* RW */
+
+/* FME thermal feature's properties */
+#define FME_THERMAL_PROP_THRESHOLD1 0x1 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD2 0x2 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD_TRIP 0x3 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_REACHED 0x4 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD2_REACHED 0x5 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_POLICY 0x6 /* RW */
+#define FME_THERMAL_PROP_TEMPERATURE 0x7 /* RDONLY */
+#define FME_THERMAL_PROP_REVISION 0x8 /* RDONLY */
+
+/* FME power feature's properties */
+#define FME_PWR_PROP_CONSUMED 0x1 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD1 0x2 /* RW */
+#define FME_PWR_PROP_THRESHOLD2 0x3 /* RW */
+#define FME_PWR_PROP_THRESHOLD1_STATUS 0x4 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD2_STATUS 0x5 /* RDONLY */
+#define FME_PWR_PROP_RTL 0x6 /* RDONLY */
+#define FME_PWR_PROP_XEON_LIMIT 0x7 /* RDONLY */
+#define FME_PWR_PROP_FPGA_LIMIT 0x8 /* RDONLY */
+#define FME_PWR_PROP_REVISION 0x9 /* RDONLY */
+
+/* FME iperf/dperf PROP format */
+#define PERF_PROP_TOP_CACHE 0x1
+#define PERF_PROP_TOP_VTD 0x2
+#define PERF_PROP_TOP_FAB 0x3
+#define PERF_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define PERF_PROP_SUB_UNUSED PROP_SUB_UNUSED
+
+#define PERF_PROP_ROOT(_id) PROP(0xff, 0xff, _id)
+#define PERF_PROP_CACHE(_id) PROP(PERF_PROP_TOP_CACHE, 0xff, _id)
+#define PERF_PROP_VTD(_sub, _id) PROP(PERF_PROP_TOP_VTD, _sub, _id)
+#define PERF_PROP_VTD_ROOT(_id) PROP(PERF_PROP_TOP_VTD, 0xff, _id)
+#define PERF_PROP_FAB(_sub, _id) PROP(PERF_PROP_TOP_FAB, _sub, _id)
+#define PERF_PROP_FAB_ROOT(_id) PROP(PERF_PROP_TOP_FAB, 0xff, _id)
+
+/* FME iperf feature's properties */
+#define FME_IPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_IPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* iperf CACHE properties */
+#define FME_IPERF_PROP_CACHE_FREEZE PERF_PROP_CACHE(0x1) /* RW */
+#define FME_IPERF_PROP_CACHE_READ_HIT PERF_PROP_CACHE(0x2)
+#define FME_IPERF_PROP_CACHE_READ_MISS PERF_PROP_CACHE(0x3)
+#define FME_IPERF_PROP_CACHE_WRITE_HIT PERF_PROP_CACHE(0x4)
+#define FME_IPERF_PROP_CACHE_WRITE_MISS PERF_PROP_CACHE(0x5)
+#define FME_IPERF_PROP_CACHE_HOLD_REQUEST PERF_PROP_CACHE(0x6)
+#define FME_IPERF_PROP_CACHE_TX_REQ_STALL PERF_PROP_CACHE(0x7)
+#define FME_IPERF_PROP_CACHE_RX_REQ_STALL PERF_PROP_CACHE(0x8)
+#define FME_IPERF_PROP_CACHE_RX_EVICTION PERF_PROP_CACHE(0x9)
+#define FME_IPERF_PROP_CACHE_DATA_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xa)
+#define FME_IPERF_PROP_CACHE_TAG_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xb)
+/* iperf VTD properties */
+#define FME_IPERF_PROP_VTD_FREEZE PERF_PROP_VTD_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_HIT PERF_PROP_VTD_ROOT(0x2)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_HIT PERF_PROP_VTD_ROOT(0x3)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_HIT PERF_PROP_VTD_ROOT(0x4)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_HIT PERF_PROP_VTD_ROOT(0x5)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_HIT PERF_PROP_VTD_ROOT(0x6)
+#define FME_IPERF_PROP_VTD_SIP_RCC_HIT PERF_PROP_VTD_ROOT(0x7)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_MISS PERF_PROP_VTD_ROOT(0x8)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_MISS PERF_PROP_VTD_ROOT(0x9)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_MISS PERF_PROP_VTD_ROOT(0xa)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_MISS PERF_PROP_VTD_ROOT(0xb)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_MISS PERF_PROP_VTD_ROOT(0xc)
+#define FME_IPERF_PROP_VTD_SIP_RCC_MISS PERF_PROP_VTD_ROOT(0xd)
+#define FME_IPERF_PROP_VTD_PORT_READ_TRANSACTION(n) PERF_PROP_VTD(n, 0xe)
+#define FME_IPERF_PROP_VTD_PORT_WRITE_TRANSACTION(n) PERF_PROP_VTD(n, 0xf)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_READ_HIT(n) PERF_PROP_VTD(n, 0x10)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_WRITE_HIT(n) PERF_PROP_VTD(n, 0x11)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_4K_FILL(n) PERF_PROP_VTD(n, 0x12)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_2M_FILL(n) PERF_PROP_VTD(n, 0x13)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_1G_FILL(n) PERF_PROP_VTD(n, 0x14)
+/* iperf FAB properties */
+#define FME_IPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_IPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_IPERF_PROP_FAB_PCIE1_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_IPERF_PROP_FAB_PCIE1_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_IPERF_PROP_FAB_UPI_READ PERF_PROP_FAB_ROOT(0x6)
+#define FME_IPERF_PROP_FAB_PORT_UPI_READ(n) PERF_PROP_FAB(n, 0x6)
+#define FME_IPERF_PROP_FAB_UPI_WRITE PERF_PROP_FAB_ROOT(0x7)
+#define FME_IPERF_PROP_FAB_PORT_UPI_WRITE(n) PERF_PROP_FAB(n, 0x7)
+#define FME_IPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x8)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x8)
+#define FME_IPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x9)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x9)
+#define FME_IPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0xa) /* RW */
+#define FME_IPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0xa) /* RW */
+
+/* FME dperf properties */
+#define FME_DPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_DPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* dperf FAB properties */
+#define FME_DPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_DPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_DPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_DPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_DPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_DPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0x6) /* RW */
+#define FME_DPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0x6) /* RW */
+
+/*PORT hdr feature's properties*/
+#define PORT_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_HDR_PROP_PORTIDX 0x2 /* RDONLY */
+#define PORT_HDR_PROP_LATENCY_TOLERANCE 0x3 /* RDONLY */
+#define PORT_HDR_PROP_AP1_EVENT 0x4 /* RW */
+#define PORT_HDR_PROP_AP2_EVENT 0x5 /* RW */
+#define PORT_HDR_PROP_POWER_STATE 0x6 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_FREQCMD 0x7 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQCNTRCMD 0x8 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQSTS 0x9 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_CNTRSTS 0xa /* RDONLY */
+
+/*PORT error feature's properties*/
+#define PORT_ERR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_ERR_PROP_ERRORS 0x2 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_ERROR 0x3 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB 0x4 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB 0x5 /* RDONLY */
+#define PORT_ERR_PROP_CLEAR 0x6 /* WRONLY */
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+
+/*
+ * Retrieve information about the fpga fme.
+ * Driver fills the info in provided struct fpga_fme_info.
+ */
+struct fpga_fme_info {
+ u32 capability; /* The capability of FME device */
+#define FPGA_FME_CAP_ERR_IRQ (1 << 0) /* Support fme error interrupt */
+};
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info);
+
+/* Set eventfd information for ifpga FME error interrupt */
+struct fpga_fme_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set);
+
+/*
+ * Retrieve information about the fpga port.
+ * Driver fills the info in provided struct fpga_port_info.
+ */
+struct fpga_port_info {
+ u32 capability; /* The capability of port device */
+#define FPGA_PORT_CAP_ERR_IRQ (1 << 0) /* Support port error interrupt */
+#define FPGA_PORT_CAP_UAFU_IRQ (1 << 1) /* Support uafu error interrupt */
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+};
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info);
+/*
+ * Retrieve region information about the fpga port.
+ * Driver needs to fill the index of struct fpga_port_region_info.
+ */
+struct fpga_port_region_info {
+ u32 index;
+#define PORT_REGION_INDEX_STP (1 << 1) /* Signal Tap Region */
+ u64 size; /* Region Size */
+ u8 *addr; /* Base address of the region */
+};
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info);
+
+/* Set eventfd information for ifpga port error interrupt */
+struct fpga_port_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set);
+
+#endif /* _OPAE_IFPGA_HW_API_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/opae_osdep.h b/drivers/raw/ifpga_rawdev/base/opae_osdep.h
new file mode 100644
index 00000000..90f54f77
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_osdep.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_OSDEP_H
+#define _OPAE_OSDEP_H
+
+#include <string.h>
+#include <stdbool.h>
+
+#ifdef RTE_LIBRTE_EAL
+#include "osdep_rte/osdep_generic.h"
+#else
+#include "osdep_raw/osdep_generic.h"
+#endif
+
+#define __iomem
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+
+struct uuid {
+ u8 b[16];
+};
+
+#ifndef LINUX_MACROS
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#ifndef GENMASK
+#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif /* GENMASK */
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#endif /* GENMASK_ULL */
+#endif /* LINUX_MACROS */
+
+#define SET_FIELD(m, v) (((v) << (__builtin_ffsll(m) - 1)) & (m))
+#define GET_FIELD(m, v) (((v) & (m)) >> (__builtin_ffsll(m) - 1))
+
+#define dev_err(x, args...) dev_printf(ERR, args)
+#define dev_info(x, args...) dev_printf(INFO, args)
+#define dev_warn(x, args...) dev_printf(WARNING, args)
+
+#ifdef OPAE_DEBUG
+#define dev_debug(x, args...) dev_printf(DEBUG, args)
+#else
+#define dev_debug(x, args...) do { } while (0)
+#endif
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warn(0, y, ##args)
+#define pr_info(y, args...) dev_info(0, y, ##args)
+
+#ifndef WARN_ON
+#define WARN_ON(x) do { \
+ int ret = !!(x); \
+ if (unlikely(ret)) \
+ pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
+} while (0)
+#endif
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define udelay(x) opae_udelay(x)
+#define msleep(x) opae_udelay(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
new file mode 100644
index 00000000..895a1d82
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RAW_GENERIC_H
+#define _OSDEP_RAW_GENERIC_H
+
+#define compiler_barrier() (asm volatile ("" : : : "memory"))
+
+#define io_wmb() compiler_barrier()
+#define io_rmb() compiler_barrier()
+
+static inline uint8_t opae_readb(const volatile void *addr)
+{
+ uint8_t val;
+
+ val = *(const volatile uint8_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint16_t opae_readw(const volatile void *addr)
+{
+ uint16_t val;
+
+ val = *(const volatile uint16_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint32_t opae_readl(const volatile void *addr)
+{
+ uint32_t val;
+
+ val = *(const volatile uint32_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint64_t opae_readq(const volatile void *addr)
+{
+ uint64_t val;
+
+ val = *(const volatile uint64_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline void opae_writeb(uint8_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint8_t *)addr = value;
+}
+
+static inline void opae_writew(uint16_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint16_t *)addr = value;
+}
+
+static inline void opae_writel(uint32_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint32_t *)addr = value;
+}
+
+static inline void opae_writeq(uint64_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint64_t *)addr = value;
+}
+
+#define opae_free(addr) free(addr)
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
new file mode 100644
index 00000000..76902e28
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RTE_GENERIC_H
+#define _OSDEP_RTE_GENERIC_H
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+
+#define dev_printf(level, fmt, args...) \
+ RTE_LOG(level, PMD, "osdep_rte: " fmt, ## args)
+
+#define osdep_panic(...) rte_panic(...)
+
+#define opae_udelay(x) rte_delay_us(x)
+
+#define opae_readb(addr) rte_read8(addr)
+#define opae_readw(addr) rte_read16(addr)
+#define opae_readl(addr) rte_read32(addr)
+#define opae_readq(addr) rte_read64(addr)
+#define opae_writeb(value, addr) rte_write8(value, addr)
+#define opae_writew(value, addr) rte_write16(value, addr)
+#define opae_writel(value, addr) rte_write32(value, addr)
+#define opae_writeq(value, addr) rte_write64(value, addr)
+
+#define opae_malloc(size) rte_malloc(NULL, size, 0)
+#define opae_zmalloc(size) rte_zmalloc(NULL, size, 0)
+#define opae_free(addr) rte_free(addr)
+
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define spinlock_t rte_spinlock_t
+#define spinlock_init(x) rte_spinlock_init(x)
+#define spinlock_lock(x) rte_spinlock_lock(x)
+#define spinlock_unlock(x) rte_spinlock_unlock(x)
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.c b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
new file mode 100644
index 00000000..030ed1b6
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
@@ -0,0 +1,619 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+
+#include <rte_errno.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_bus_vdev.h>
+
+#include "base/opae_hw_api.h"
+#include "rte_rawdev.h"
+#include "rte_rawdev_pmd.h"
+#include "rte_bus_ifpga.h"
+#include "ifpga_common.h"
+#include "ifpga_logs.h"
+#include "ifpga_rawdev.h"
+
+int ifpga_rawdev_logtype;
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+/* PCI Device ID */
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+/* VF Device */
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+#define RTE_MAX_RAW_DEVICE 10
+
+static const struct rte_pci_id pci_ifpga_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static int
+ifpga_fill_afu_dev(struct opae_accelerator *acc,
+ struct rte_afu_device *afu_dev)
+{
+ struct rte_mem_resource *res = afu_dev->mem_resource;
+ struct opae_acc_region_info region_info;
+ struct opae_acc_info info;
+ unsigned long i;
+ int ret;
+
+ ret = opae_acc_get_info(acc, &info);
+ if (ret)
+ return ret;
+
+ if (info.num_regions > PCI_MAX_RESOURCE)
+ return -EFAULT;
+
+ afu_dev->num_region = info.num_regions;
+
+ for (i = 0; i < info.num_regions; i++) {
+ region_info.index = i;
+ ret = opae_acc_get_region_info(acc, &region_info);
+ if (ret)
+ return ret;
+
+ if ((region_info.flags & ACC_REGION_MMIO) &&
+ (region_info.flags & ACC_REGION_READ) &&
+ (region_info.flags & ACC_REGION_WRITE)) {
+ res[i].phys_addr = region_info.phys_addr;
+ res[i].len = region_info.len;
+ res[i].addr = region_info.addr;
+ } else
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void
+ifpga_rawdev_info_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t dev_info)
+{
+ struct opae_adapter *adapter;
+ struct opae_accelerator *acc;
+ struct rte_afu_device *afu_dev;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ if (!dev_info) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid request");
+ return;
+ }
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return;
+
+ afu_dev = dev_info;
+ afu_dev->rawdev = dev;
+
+ /* find opae_accelerator and fill info into afu_device */
+ opae_adapter_for_each_acc(adapter, acc) {
+ if (acc->index != afu_dev->id.port)
+ continue;
+
+ if (ifpga_fill_afu_dev(acc, afu_dev)) {
+ IFPGA_RAWDEV_PMD_ERR("cannot get info\n");
+ return;
+ }
+ }
+}
+
+static int
+ifpga_rawdev_configure(const struct rte_rawdev *dev,
+ rte_rawdev_obj_t config)
+{
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ return config ? 0 : 1;
+}
+
+static int
+ifpga_rawdev_start(struct rte_rawdev *dev)
+{
+ int ret = 0;
+ struct opae_adapter *adapter;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ return ret;
+}
+
+static void
+ifpga_rawdev_stop(struct rte_rawdev *dev)
+{
+ dev->started = 0;
+}
+
+static int
+ifpga_rawdev_close(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+ifpga_rawdev_reset(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+fpga_pr(struct rte_rawdev *raw_dev, u32 port_id, u64 *buffer, u32 size,
+ u64 *status)
+{
+
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+ struct opae_accelerator *acc;
+ struct opae_bridge *br;
+ int ret;
+
+ adapter = ifpga_rawdev_get_priv(raw_dev);
+ if (!adapter)
+ return -ENODEV;
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr)
+ return -ENODEV;
+
+ acc = opae_adapter_get_acc(adapter, port_id);
+ if (!acc)
+ return -ENODEV;
+
+ br = opae_acc_get_br(acc);
+ if (!br)
+ return -ENODEV;
+
+ ret = opae_manager_flash(mgr, port_id, buffer, size, status);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s pr error %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = opae_bridge_reset(br);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s reset port:%d error %d\n",
+ __func__, port_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id,
+ const char *file_name)
+{
+ struct stat file_stat;
+ int file_fd;
+ int ret = 0;
+ ssize_t buffer_size;
+ void *buffer;
+ u64 pr_error;
+
+ if (!file_name)
+ return -EINVAL;
+
+ file_fd = open(file_name, O_RDONLY);
+ if (file_fd < 0) {
+ IFPGA_RAWDEV_PMD_ERR("%s: open file error: %s\n",
+ __func__, file_name);
+ IFPGA_RAWDEV_PMD_ERR("Message : %s\n", strerror(errno));
+ return -EINVAL;
+ }
+ ret = stat(file_name, &file_stat);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("stat on bitstream file failed: %s\n",
+ file_name);
+ return -EINVAL;
+ }
+ buffer_size = file_stat.st_size;
+ IFPGA_RAWDEV_PMD_INFO("bitstream file size: %zu\n", buffer_size);
+ buffer = rte_malloc(NULL, buffer_size, 0);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto close_fd;
+ }
+
+ /*read the raw data*/
+ if (buffer_size != read(file_fd, (void *)buffer, buffer_size)) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+ /*do PR now*/
+ ret = fpga_pr(rawdev, port_id, buffer, buffer_size, &pr_error);
+ IFPGA_RAWDEV_PMD_INFO("downloading to device port %d....%s.\n", port_id,
+ ret ? "failed" : "success");
+ if (ret) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+free_buffer:
+ if (buffer)
+ rte_free(buffer);
+close_fd:
+ close(file_fd);
+ file_fd = 0;
+ return ret;
+}
+
+static int
+ifpga_rawdev_pr(struct rte_rawdev *dev,
+ rte_rawdev_obj_t pr_conf)
+{
+ struct opae_adapter *adapter;
+ struct rte_afu_pr_conf *afu_pr_conf;
+ int ret;
+ struct uuid uuid;
+ struct opae_accelerator *acc;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ if (!pr_conf)
+ return -EINVAL;
+
+ afu_pr_conf = pr_conf;
+
+ if (afu_pr_conf->pr_enable) {
+ ret = rte_fpga_do_pr(dev,
+ afu_pr_conf->afu_id.port,
+ afu_pr_conf->bs_path);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("do pr error %d\n", ret);
+ return ret;
+ }
+ }
+
+ acc = opae_adapter_get_acc(adapter, afu_pr_conf->afu_id.port);
+ if (!acc)
+ return -ENODEV;
+
+ ret = opae_acc_get_uuid(acc, &uuid);
+ if (ret)
+ return ret;
+
+ memcpy(&afu_pr_conf->afu_id.uuid.uuid_low, uuid.b, sizeof(u64));
+ memcpy(&afu_pr_conf->afu_id.uuid.uuid_high, uuid.b + 8, sizeof(u64));
+
+ IFPGA_RAWDEV_PMD_INFO("%s: uuid_l=0x%lx, uuid_h=0x%lx\n", __func__,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_low,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_high);
+
+ return 0;
+}
+
+static const struct rte_rawdev_ops ifpga_rawdev_ops = {
+ .dev_info_get = ifpga_rawdev_info_get,
+ .dev_configure = ifpga_rawdev_configure,
+ .dev_start = ifpga_rawdev_start,
+ .dev_stop = ifpga_rawdev_stop,
+ .dev_close = ifpga_rawdev_close,
+ .dev_reset = ifpga_rawdev_reset,
+
+ .queue_def_conf = NULL,
+ .queue_setup = NULL,
+ .queue_release = NULL,
+
+ .attr_get = NULL,
+ .attr_set = NULL,
+
+ .enqueue_bufs = NULL,
+ .dequeue_bufs = NULL,
+
+ .dump = NULL,
+
+ .xstats_get = NULL,
+ .xstats_get_names = NULL,
+ .xstats_get_by_name = NULL,
+ .xstats_reset = NULL,
+
+ .firmware_status_get = NULL,
+ .firmware_version_get = NULL,
+ .firmware_load = ifpga_rawdev_pr,
+ .firmware_unload = NULL,
+
+ .dev_selftest = NULL,
+};
+
+static int
+ifpga_rawdev_create(struct rte_pci_device *pci_dev,
+ int socket_id)
+{
+ int ret = 0;
+ struct rte_rawdev *rawdev = NULL;
+ struct opae_adapter *adapter = NULL;
+ struct opae_manager *mgr = NULL;
+ struct opae_adapter_data_pci *data = NULL;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ int i;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct opae_adapter),
+ socket_id);
+ if (rawdev == NULL) {
+ IFPGA_RAWDEV_PMD_ERR("Unable to allocate rawdevice");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* alloc OPAE_FPGA_PCI data to register to OPAE hardware level API */
+ data = opae_adapter_data_alloc(OPAE_FPGA_PCI);
+ if (!data) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* init opae_adapter_data_pci for device specific information */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ data->region[i].phys_addr = pci_dev->mem_resource[i].phys_addr;
+ data->region[i].len = pci_dev->mem_resource[i].len;
+ data->region[i].addr = pci_dev->mem_resource[i].addr;
+ }
+ data->device_id = pci_dev->id.device_id;
+ data->vendor_id = pci_dev->id.vendor_id;
+
+ /* create a opae_adapter based on above device data */
+ adapter = opae_adapter_alloc(pci_dev->device.name, data);
+ if (!adapter) {
+ ret = -ENOMEM;
+ goto free_adapter_data;
+ }
+
+ rawdev->dev_ops = &ifpga_rawdev_ops;
+ rawdev->device = &pci_dev->device;
+ rawdev->driver_name = pci_dev->device.driver->name;
+
+ rawdev->dev_private = adapter;
+
+ /* must enumerate the adapter before use it */
+ ret = opae_adapter_enumerate(adapter);
+ if (ret)
+ goto free_adapter;
+
+ /* get opae_manager to rawdev */
+ mgr = opae_adapter_get_mgr(adapter);
+ if (mgr) {
+ /* PF function */
+ IFPGA_RAWDEV_PMD_INFO("this is a PF function");
+ }
+
+ return ret;
+
+free_adapter:
+ if (adapter)
+ opae_adapter_free(adapter);
+free_adapter_data:
+ if (data)
+ opae_adapter_data_free(data);
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_destroy(struct rte_pci_device *pci_dev)
+{
+ int ret;
+ struct rte_rawdev *rawdev;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct opae_adapter *adapter;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Closing %s on NUMA node %d",
+ name, rte_socket_id());
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rawdev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ adapter = ifpga_rawdev_get_priv(rawdev);
+ if (!adapter)
+ return -ENODEV;
+
+ opae_adapter_data_free(adapter->data);
+ opae_adapter_free(adapter);
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ IFPGA_RAWDEV_PMD_DEBUG("Device cleanup failed");
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+ return ifpga_rawdev_create(pci_dev, rte_socket_id());
+}
+
+static int
+ifpga_rawdev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return ifpga_rawdev_destroy(pci_dev);
+}
+
+static struct rte_pci_driver rte_ifpga_rawdev_pmd = {
+ .id_table = pci_ifpga_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ifpga_rawdev_pci_probe,
+ .remove = ifpga_rawdev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(ifpga_rawdev_pci_driver, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(ifpga_rawdev_init_log);
+static void
+ifpga_rawdev_init_log(void)
+{
+ ifpga_rawdev_logtype = rte_log_register("driver.raw.init");
+ if (ifpga_rawdev_logtype >= 0)
+ rte_log_set_level(ifpga_rawdev_logtype, RTE_LOG_NOTICE);
+}
+
+static const char * const valid_args[] = {
+#define IFPGA_ARG_NAME "ifpga"
+ IFPGA_ARG_NAME,
+#define IFPGA_ARG_PORT "port"
+ IFPGA_ARG_PORT,
+#define IFPGA_AFU_BTS "afu_bts"
+ IFPGA_AFU_BTS,
+ NULL
+};
+
+static int
+ifpga_cfg_probe(struct rte_vdev_device *dev)
+{
+ struct rte_devargs *devargs;
+ struct rte_kvargs *kvlist = NULL;
+ int port;
+ char *name = NULL;
+ char dev_name[RTE_RAWDEV_NAME_MAX_LEN];
+
+ devargs = dev->device.devargs;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_RAWDEV_PMD_LOG(ERR, "error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+ &rte_ifpga_get_string_arg, &name) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+ if (rte_kvargs_process(kvlist,
+ IFPGA_ARG_PORT,
+ &rte_ifpga_get_integer32_arg,
+ &port) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+
+ memset(dev_name, 0, sizeof(dev_name));
+ snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "%d|%s",
+ port, name);
+
+ rte_eal_hotplug_add(RTE_STR(IFPGA_BUS_NAME),
+ dev_name, devargs->args);
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (name)
+ free(name);
+
+ return 0;
+}
+
+static int
+ifpga_cfg_remove(struct rte_vdev_device *vdev)
+{
+ IFPGA_RAWDEV_PMD_INFO("Remove ifpga_cfg %p",
+ vdev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver ifpga_cfg_driver = {
+ .probe = ifpga_cfg_probe,
+ .remove = ifpga_cfg_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(ifpga_rawdev_cfg, ifpga_cfg_driver);
+RTE_PMD_REGISTER_ALIAS(ifpga_rawdev_cfg, ifpga_cfg);
+RTE_PMD_REGISTER_PARAM_STRING(ifpga_rawdev_cfg,
+ "ifpga=<string> "
+ "port=<int> "
+ "afu_bts=<path>");
+
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.h b/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
new file mode 100644
index 00000000..c7759b8b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_RAWDEV_H_
+#define _IFPGA_RAWDEV_H_
+
+extern int ifpga_rawdev_logtype;
+
+#define IFPGA_RAWDEV_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_rawdev_logtype, "ifgpa: " fmt, \
+ ##args)
+
+#define IFPGA_RAWDEV_PMD_FUNC_TRACE() IFPGA_RAWDEV_PMD_LOG(DEBUG, ">>")
+
+#define IFPGA_RAWDEV_PMD_DEBUG(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(DEBUG, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_INFO(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(INFO, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_ERR(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(ERR, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_WARN(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(WARNING, fmt, ## args)
+
+enum ifpga_rawdev_device_state {
+ IFPGA_IDLE,
+ IFPGA_READY,
+ IFPGA_ERROR
+};
+
+static inline struct opae_adapter *
+ifpga_rawdev_get_priv(const struct rte_rawdev *rawdev)
+{
+ return rawdev->dev_private;
+}
+
+#endif /* _IFPGA_RAWDEV_H_ */
diff --git a/drivers/raw/ifpga_rawdev/meson.build b/drivers/raw/ifpga_rawdev/meson.build
new file mode 100644
index 00000000..67256872
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+version = 1
+
+subdir('base')
+objs = [base_objs]
+
+deps += ['rawdev', 'pci', 'bus_pci', 'kvargs',
+ 'bus_vdev', 'bus_ifpga']
+sources = files('ifpga_rawdev.c')
+
+includes += include_directories('base')
+
+allow_experimental_apis = true
diff --git a/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map b/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/raw/meson.build b/drivers/raw/meson.build
new file mode 100644
index 00000000..a61cdcce
--- /dev/null
+++ b/drivers/raw/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+drivers = ['skeleton_rawdev', 'dpaa2_cmdif', 'dpaa2_qdma', 'ifpga_rawdev']
+std_deps = ['rawdev']
+config_flag_fmt = 'RTE_LIBRTE_PMD_@0@_RAWDEV'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/raw/skeleton_rawdev/meson.build b/drivers/raw/skeleton_rawdev/meson.build
new file mode 100644
index 00000000..7cb2d3fb
--- /dev/null
+++ b/drivers/raw/skeleton_rawdev/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+deps += ['rawdev', 'kvargs', 'mbuf', 'bus_vdev']
+sources = files('skeleton_rawdev.c',
+ 'skeleton_rawdev_test.c')
+
+allow_experimental_apis = true
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
index 795f24bc..3eb5c3a7 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
@@ -288,6 +288,7 @@ test_rawdev_attr_set_get(void)
"Attribute (Test2) not set correctly (%" PRIu64 ")",
ret_value);
+ free(dummy_value);
return TEST_SUCCESS;
}
@@ -379,8 +380,6 @@ test_rawdev_enqdeq(void)
cleanup:
if (buffers[0].buf_addr)
free(buffers[0].buf_addr);
- if (deq_buffers)
- free(deq_buffers);
return TEST_FAILED;
}